aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.8.3/libsanitizer/sanitizer_common
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.8.3/libsanitizer/sanitizer_common')
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.am75
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.in551
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.cc82
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.h1169
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic.h63
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h122
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h158
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.cc239
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.h262
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc337
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc307
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.cc96
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.h25
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_internal_defs.h268
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_lfstack.h71
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.cc225
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.h87
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_linux.cc542
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_list.h122
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mac.cc320
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mutex.h121
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_placement_new.h31
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h46
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc68
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h41
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_posix.cc228
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_printf.cc217
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_procmaps.h110
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_quarantine.h170
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_report_decorator.h35
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc202
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.h34
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc262
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.h81
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc408
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.h112
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc40
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_linux.cc180
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc29
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc35
-rw-r--r--gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_win.cc294
41 files changed, 7865 insertions, 0 deletions
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.am b/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.am
new file mode 100644
index 000000000..c53a3c6bf
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.am
@@ -0,0 +1,75 @@
+AM_CPPFLAGS = -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
+AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+
+noinst_LTLIBRARIES = libsanitizer_common.la
+
+sanitizer_common_files = \
+ sanitizer_allocator.cc \
+ sanitizer_common.cc \
+ sanitizer_flags.cc \
+ sanitizer_libc.cc \
+ sanitizer_linux.cc \
+ sanitizer_mac.cc \
+ sanitizer_platform_limits_posix.cc \
+ sanitizer_posix.cc \
+ sanitizer_printf.cc \
+ sanitizer_stackdepot.cc \
+ sanitizer_stacktrace.cc \
+ sanitizer_symbolizer.cc \
+ sanitizer_symbolizer_itanium.cc \
+ sanitizer_symbolizer_linux.cc \
+ sanitizer_symbolizer_mac.cc \
+ sanitizer_symbolizer_win.cc \
+ sanitizer_win.cc
+
+libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES=
+
+## ################################################################
+
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.in b/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.in
new file mode 100644
index 000000000..a3030836d
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/Makefile.in
@@ -0,0 +1,551 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = sanitizer_common
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
+ $(top_srcdir)/../config/depstand.m4 \
+ $(top_srcdir)/../config/lead-dot.m4 \
+ $(top_srcdir)/../config/libstdc++-raw-cxx.m4 \
+ $(top_srcdir)/../config/multi.m4 \
+ $(top_srcdir)/../config/override.m4 \
+ $(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \
+ $(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \
+ $(top_srcdir)/acinclude.m4 $(top_srcdir)/../libtool.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libsanitizer_common_la_LIBADD =
+am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
+ sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
+ sanitizer_mac.lo sanitizer_platform_limits_posix.lo \
+ sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
+ sanitizer_stacktrace.lo sanitizer_symbolizer.lo \
+ sanitizer_symbolizer_itanium.lo sanitizer_symbolizer_linux.lo \
+ sanitizer_symbolizer_mac.lo sanitizer_symbolizer_win.lo \
+ sanitizer_win.lo
+am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
+libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
+DEFAULT_INCLUDES = -I.@am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/../depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libsanitizer_common_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSTDCXX_RAW_CXX_CXXFLAGS = @LIBSTDCXX_RAW_CXX_CXXFLAGS@
+LIBSTDCXX_RAW_CXX_LDFLAGS = @LIBSTDCXX_RAW_CXX_LDFLAGS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_shared = @enable_shared@
+enable_static = @enable_static@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+multi_basedir = @multi_basedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_noncanonical = @target_noncanonical@
+target_os = @target_os@
+target_vendor = @target_vendor@
+toolexecdir = @toolexecdir@
+toolexeclibdir = @toolexeclibdir@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AM_CPPFLAGS = -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
+ -Wno-long-long -fPIC -fno-builtin -fno-exceptions \
+ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+noinst_LTLIBRARIES = libsanitizer_common.la
+sanitizer_common_files = \
+ sanitizer_allocator.cc \
+ sanitizer_common.cc \
+ sanitizer_flags.cc \
+ sanitizer_libc.cc \
+ sanitizer_linux.cc \
+ sanitizer_mac.cc \
+ sanitizer_platform_limits_posix.cc \
+ sanitizer_posix.cc \
+ sanitizer_printf.cc \
+ sanitizer_stackdepot.cc \
+ sanitizer_stacktrace.cc \
+ sanitizer_symbolizer.cc \
+ sanitizer_symbolizer_itanium.cc \
+ sanitizer_symbolizer_linux.cc \
+ sanitizer_symbolizer_mac.cc \
+ sanitizer_symbolizer_win.cc \
+ sanitizer_win.cc
+
+libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES =
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .cc .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign sanitizer_common/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign sanitizer_common/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+ @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+ test "$$dir" != "$$p" || dir=.; \
+ echo "rm -f \"$${dir}/so_locations\""; \
+ rm -f "$${dir}/so_locations"; \
+ done
+libsanitizer_common.la: $(libsanitizer_common_la_OBJECTS) $(libsanitizer_common_la_DEPENDENCIES)
+ $(CXXLINK) $(libsanitizer_common_la_OBJECTS) $(libsanitizer_common_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_itanium.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
+
+.cc.o:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cc.obj:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cc.lo:
+@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+ distclean-compile distclean-generic distclean-libtool \
+ distclean-tags dvi dvi-am html html-am info info-am install \
+ install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+ pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.cc
new file mode 100644
index 000000000..a54de9d6f
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -0,0 +1,82 @@
+//===-- sanitizer_allocator.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// This allocator that is used inside run-times.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+
+// FIXME: We should probably use more low-level allocator that would
+// mmap some pages and split them into chunks to fulfill requests.
+#if defined(__linux__) && !defined(__ANDROID__)
+extern "C" void *__libc_malloc(__sanitizer::uptr size);
+extern "C" void __libc_free(void *ptr);
+# define LIBC_MALLOC __libc_malloc
+# define LIBC_FREE __libc_free
+#else // __linux__ && !ANDROID
+# include <stdlib.h>
+# define LIBC_MALLOC malloc
+# define LIBC_FREE free
+#endif // __linux__ && !ANDROID
+
+namespace __sanitizer {
+
+const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+
+void *InternalAlloc(uptr size) {
+ if (size + sizeof(u64) < size)
+ return 0;
+ void *p = LIBC_MALLOC(size + sizeof(u64));
+ if (p == 0)
+ return 0;
+ ((u64*)p)[0] = kBlockMagic;
+ return (char*)p + sizeof(u64);
+}
+
+void InternalFree(void *addr) {
+ if (addr == 0)
+ return;
+ addr = (char*)addr - sizeof(u64);
+ CHECK_EQ(((u64*)addr)[0], kBlockMagic);
+ ((u64*)addr)[0] = 0;
+ LIBC_FREE(addr);
+}
+
+// LowLevelAllocator
+static LowLevelAllocateCallback low_level_alloc_callback;
+
+void *LowLevelAllocator::Allocate(uptr size) {
+ // Align allocation size.
+ size = RoundUpTo(size, 8);
+ if (allocated_end_ - allocated_current_ < (sptr)size) {
+ uptr size_to_allocate = Max(size, GetPageSizeCached());
+ allocated_current_ =
+ (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
+ allocated_end_ = allocated_current_ + size_to_allocate;
+ if (low_level_alloc_callback) {
+ low_level_alloc_callback((uptr)allocated_current_,
+ size_to_allocate);
+ }
+ }
+ CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
+ void *res = allocated_current_;
+ allocated_current_ += size;
+ return res;
+}
+
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
+ low_level_alloc_callback = callback;
+}
+
+bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
+ if (!size) return false;
+ uptr max = (uptr)-1L;
+ return (max / size) < n;
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.h
new file mode 100644
index 000000000..889281216
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -0,0 +1,1169 @@
+//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_lfstack.h"
+
+namespace __sanitizer {
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 corresponds to size 0.
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 8 classes: 256 + i * 32 (i = 1 to 8).
+// Next 8 classes: 512 + i * 64 (i = 1 to 8).
+// ...
+// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is betweed 12% and 6%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCached is the maximal number of chunks per size class.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 288 diff: +32 12% l 8 cached: 227 65376; id 17
+// c18 => s: 320 diff: +32 11% l 8 cached: 204 65280; id 18
+// c19 => s: 352 diff: +32 10% l 8 cached: 186 65472; id 19
+// c20 => s: 384 diff: +32 09% l 8 cached: 170 65280; id 20
+// c21 => s: 416 diff: +32 08% l 8 cached: 157 65312; id 21
+// c22 => s: 448 diff: +32 07% l 8 cached: 146 65408; id 22
+// c23 => s: 480 diff: +32 07% l 8 cached: 136 65280; id 23
+//
+// c24 => s: 512 diff: +32 06% l 9 cached: 128 65536; id 24
+// c25 => s: 576 diff: +64 12% l 9 cached: 113 65088; id 25
+// c26 => s: 640 diff: +64 11% l 9 cached: 102 65280; id 26
+// c27 => s: 704 diff: +64 10% l 9 cached: 93 65472; id 27
+// c28 => s: 768 diff: +64 09% l 9 cached: 85 65280; id 28
+// c29 => s: 832 diff: +64 08% l 9 cached: 78 64896; id 29
+// c30 => s: 896 diff: +64 07% l 9 cached: 73 65408; id 30
+// c31 => s: 960 diff: +64 07% l 9 cached: 68 65280; id 31
+//
+// c32 => s: 1024 diff: +64 06% l 10 cached: 64 65536; id 32
+
+template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
+ uptr kMinBatchClassT>
+class SizeClassMap {
+ static const uptr kMinSizeLog = 4;
+ static const uptr kMidSizeLog = kMinSizeLog + 4;
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = 3;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ static const uptr kMaxNumCached = kMaxNumCachedT;
+ struct TransferBatch {
+ TransferBatch *next;
+ uptr count;
+ void *batch[kMaxNumCached];
+ };
+
+ static const uptr kMinBatchClass = kMinBatchClassT;
+ static const uptr kMaxSize = 1 << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses == 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = MostSignificantSetBitIndex(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCached(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max<uptr>(1, Min(kMaxNumCached, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = MostSignificantSetBitIndex(s);
+ uptr cached = MaxCached(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+
+ // TransferBatch for kMinBatchClass must fit into the block itself.
+ const uptr batch_size = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass));
+ CHECK_LE(batch_size, Size(kMinBatchClass));
+ // TransferBatch for kMinBatchClass-1 must not fit into the block itself.
+ const uptr batch_size1 = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass - 1));
+ CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
+ }
+};
+
+typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(25, 28)>
+ DefaultSizeClassMap;
+typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(17, 20)>
+ CompactSizeClassMap;
+template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatMalloced,
+ AllocatorStatFreed,
+ AllocatorStatMmapped,
+ AllocatorStatUnmapped,
+ AllocatorStatCount
+};
+
+typedef u64 AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void Add(AllocatorStat i, u64 v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, u64 v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ u64 Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uint64_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ next_ = this;
+ prev_ = this;
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
+
+// Allocators call these callbacks on mmap/munmap.
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const { }
+};
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at
+// a fixed address (kSpaceBeg). Both constants are powers of two and
+// kSpaceBeg is kSpaceSize-aligned.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
+template <const uptr kSpaceBeg, const uptr kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator64 {
+ public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
+ void Init() {
+ CHECK_EQ(kSpaceBeg,
+ reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
+ MapWithCallback(kSpaceEnd, AdditionalSize());
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ Batch *b = region->free_list.Pop();
+ if (b == 0)
+ b = PopulateFreeList(stat, c, class_id, region);
+ region->n_allocated += b->count;
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ region->free_list.Push(b);
+ region->n_freed += b->count;
+ }
+
+ static bool PointerIsMine(void *p) {
+ return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
+ }
+
+ static uptr GetSizeClass(void *p) {
+ return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return 0;
+ }
+
+ static uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) continue;
+ Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
+ class_id,
+ SizeClassMap::Size(class_id),
+ region->mapped_user >> 10,
+ region->n_allocated,
+ region->n_allocated - region->n_freed);
+ }
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
+ COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // Populate the free list with at most this number of bytes at once
+ // or with one element if its size is greater.
+ static const uptr kPopulateSize = 1 << 14;
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ LFStack<Batch> free_list;
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ uptr n_allocated, n_freed; // Just stats.
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ static uptr GetChunkIdx(uptr chunk, uptr size) {
+ u32 offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
+ // We save 2x by using 32-bit div, but may need to use a 256-way switch.
+ return offset / (u32)size;
+ }
+
+ NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id, RegionInfo *region) {
+ BlockingMutexLock l(&region->mutex);
+ Batch *b = region->free_list.Pop();
+ if (b)
+ return b;
+ uptr size = SizeClassMap::Size(class_id);
+ uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + count * size;
+ uptr region_beg = kSpaceBeg + kRegionSize * class_id;
+ if (end_idx + size > region->mapped_user) {
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx + size > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMmapped, map_size);
+ region->mapped_user += map_size;
+ }
+ uptr total_count = (region->mapped_user - beg_idx - size)
+ / size / count * count;
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(region_beg + kRegionSize -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->allocated_user + region->allocated_meta > kRegionSize) {
+ Printf("Out of memory. Dying.\n");
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ for (;;) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)(region_beg + beg_idx);
+ b->count = count;
+ for (uptr i = 0; i < count; i++)
+ b->batch[i] = (void*)(region_beg + beg_idx + i * size);
+ region->allocated_user += count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ beg_idx += count * size;
+ if (beg_idx + count * size + size > region->mapped_user)
+ break;
+ region->free_list.Push(b);
+ }
+ return b;
+ }
+};
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
+ void Init() {
+ state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(stat, c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ Batch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(b);
+ }
+
+ bool PointerIsMine(void *p) {
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(void *p) {
+ return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ void PrintStats() {
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<Batch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMmapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
+ state_->possible_regions[ComputeRegionId(res)] = class_id;
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &state_->size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ uptr size = SizeClassMap::Size(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = SizeClassMap::MaxCached(class_id);
+ Batch *b = 0;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (b == 0) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)i;
+ b->count = 0;
+ }
+ b->batch[b->count++] = (void*)i;
+ if (b->count == max_count) {
+ sci->free_list.push_back(b);
+ b = 0;
+ }
+ }
+ if (b)
+ sci->free_list.push_back(b);
+ }
+
+ struct State {
+ u8 possible_regions[kNumPossibleRegions];
+ SizeClassInfo size_class_info_array[kNumClasses];
+ };
+ State *state_;
+};
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ typedef typename SizeClassMap::TransferBatch Batch;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * SizeClassMap::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[0].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCached(i);
+ }
+ }
+
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->count, 0);
+ for (uptr i = 0; i < b->count; i++)
+ c->batch[i] = b->batch[i];
+ c->count = b->count;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
+ }
+
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)c->batch[0];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ for (uptr i = 0; i < cnt; i++) {
+ b->batch[i] = c->batch[i];
+ c->batch[i] = c->batch[i + c->max_count / 2];
+ }
+ b->count = cnt;
+ c->count -= cnt;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ page_size_ = GetPageSizeCached();
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ if (map_size < size) return 0; // Overflow.
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK_EQ(0, res & (alignment - 1));
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatMalloced, map_size);
+ stat->Add(AllocatorStatMmapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Add(AllocatorStatFreed, h->map_size);
+ stat->Add(AllocatorStatUnmapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(void *p) {
+ return GetBlockBegin(p) != 0;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return 0;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size < p)
+ return 0;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK_EQ(p % page_size_, 0);
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
+
+ void *GetUser(Header *h) {
+ CHECK_EQ((uptr)h % page_size_, 0);
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ SpinMutex mutex_;
+};
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void Init() {
+ primary_.Init();
+ secondary_.Init();
+ stats_.Init();
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size)
+ return 0;
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ if (cleared && res)
+ internal_memset(res, 0, size);
+ return res;
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return 0;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+};
+
+// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
+bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic.h
new file mode 100644
index 000000000..f2bf23588
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic.h
@@ -0,0 +1,63 @@
+//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_H
+#define SANITIZER_ATOMIC_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum memory_order {
+ memory_order_relaxed = 1 << 0,
+ memory_order_consume = 1 << 1,
+ memory_order_acquire = 1 << 2,
+ memory_order_release = 1 << 3,
+ memory_order_acq_rel = 1 << 4,
+ memory_order_seq_cst = 1 << 5
+};
+
+struct atomic_uint8_t {
+ typedef u8 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint16_t {
+ typedef u16 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint32_t {
+ typedef u32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint64_t {
+ typedef u64 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uintptr_t {
+ typedef uptr Type;
+ volatile Type val_dont_use;
+};
+
+} // namespace __sanitizer
+
+#if defined(__GNUC__)
+# include "sanitizer_atomic_clang.h"
+#elif defined(_MSC_VER)
+# include "sanitizer_atomic_msvc.h"
+#else
+# error "Unsupported compiler"
+#endif
+
+#endif // SANITIZER_ATOMIC_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
new file mode 100644
index 000000000..bb4611d51
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
@@ -0,0 +1,122 @@
+//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_H
+#define SANITIZER_ATOMIC_CLANG_H
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ __sync_synchronize();
+}
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+#if defined(__i386__) || defined(__x86_64__)
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+#endif
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, -v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_exchange(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
+ __sync_synchronize();
+ v = __sync_lock_test_and_set(&a->val_dont_use, v);
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ return v;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ typedef typename T::Type Type;
+ Type cmpv = *cmp;
+ Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
new file mode 100644
index 000000000..919e24f3b
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -0,0 +1,158 @@
+//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_MSVC_H
+#define SANITIZER_ATOMIC_MSVC_H
+
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+extern "C" void _mm_mfence();
+#pragma intrinsic(_mm_mfence)
+extern "C" void _mm_pause();
+#pragma intrinsic(_mm_pause)
+extern "C" long _InterlockedExchangeAdd( // NOLINT
+ long volatile * Addend, long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd)
+
+#ifdef _WIN64
+extern "C" void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#else
+// There's no _InterlockedCompareExchangePointer intrinsic on x86,
+// so call _InterlockedCompareExchange instead.
+extern "C"
+long __cdecl _InterlockedCompareExchange( // NOLINT
+ long volatile *Destination, // NOLINT
+ long Exchange, long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange)
+
+inline static void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand) {
+ return reinterpret_cast<void*>(
+ _InterlockedCompareExchange(
+ reinterpret_cast<long volatile*>(Destination), // NOLINT
+ reinterpret_cast<long>(Exchange), // NOLINT
+ reinterpret_cast<long>(Comparand))); // NOLINT
+}
+#endif
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ _ReadWriteBarrier();
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ _mm_mfence();
+}
+
+INLINE void proc_yield(int cnt) {
+ for (int i = 0; i < cnt; i++)
+ _mm_pause();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+}
+
+INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+ u8 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ __asm {
+ mov eax, a
+ mov cl, v
+ xchg [eax], cl // NOLINT
+ mov v, cl
+ }
+ return v;
+}
+
+INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+ u16 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ __asm {
+ mov eax, a
+ mov cx, v
+ xchg [eax], cx // NOLINT
+ mov v, cx
+ }
+ return v;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+ uptr *cmp,
+ uptr xchg,
+ memory_order mo) {
+ uptr cmpv = *cmp;
+ uptr prev = (uptr)_InterlockedCompareExchangePointer(
+ (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.cc
new file mode 100644
index 000000000..f8d2d0e3f
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -0,0 +1,239 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+const char *SanitizerToolName = "SanitizerTool";
+
+uptr GetPageSizeCached() {
+ static uptr PageSize;
+ if (!PageSize)
+ PageSize = GetPageSize();
+ return PageSize;
+}
+
+static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
+
+// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
+// isn't equal to the current PID, try to obtain file descriptor by opening
+// file "report_path_prefix.<PID>".
+static fd_t report_fd = kStderrFd;
+static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
+// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
+// child thread will be different from |report_fd_pid|.
+static int report_fd_pid = 0;
+
+static void (*DieCallback)(void);
+void SetDieCallback(void (*callback)(void)) {
+ DieCallback = callback;
+}
+
+void NORETURN Die() {
+ if (DieCallback) {
+ DieCallback();
+ }
+ internal__exit(1);
+}
+
+static CheckFailedCallbackType CheckFailedCallback;
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {
+ CheckFailedCallback = callback;
+}
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ if (CheckFailedCallback) {
+ CheckFailedCallback(file, line, cond, v1, v2);
+ }
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
+ Die();
+}
+
+static void MaybeOpenReportFile() {
+ if (!log_to_file || (report_fd_pid == GetPid())) return;
+ InternalScopedBuffer<char> report_path_full(4096);
+ internal_snprintf(report_path_full.data(), report_path_full.size(),
+ "%s.%d", report_path_prefix, GetPid());
+ fd_t fd = OpenFile(report_path_full.data(), true);
+ if (fd == kInvalidFd) {
+ report_fd = kStderrFd;
+ log_to_file = false;
+ Report("ERROR: Can't open file: %s\n", report_path_full.data());
+ Die();
+ }
+ if (report_fd != kInvalidFd) {
+ // We're in the child. Close the parent's log.
+ internal_close(report_fd);
+ }
+ report_fd = fd;
+ report_fd_pid = GetPid();
+}
+
+bool PrintsToTty() {
+ MaybeOpenReportFile();
+ return internal_isatty(report_fd);
+}
+
+void RawWrite(const char *buffer) {
+ static const char *kRawWriteError = "RawWrite can't output requested buffer!";
+ uptr length = (uptr)internal_strlen(buffer);
+ MaybeOpenReportFile();
+ if (length != internal_write(report_fd, buffer, length)) {
+ internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
+ Die();
+ }
+}
+
+uptr ReadFileToBuffer(const char *file_name, char **buff,
+ uptr *buff_size, uptr max_len) {
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = PageSize;
+ uptr read_len = 0;
+ *buff = 0;
+ *buff_size = 0;
+ // The files we usually open are not seekable, so try different buffer sizes.
+ for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
+ fd_t fd = OpenFile(file_name, /*write*/ false);
+ if (fd == kInvalidFd) return 0;
+ UnmapOrDie(*buff, *buff_size);
+ *buff = (char*)MmapOrDie(size, __FUNCTION__);
+ *buff_size = size;
+ // Read up to one page at a time.
+ read_len = 0;
+ bool reached_eof = false;
+ while (read_len + PageSize <= size) {
+ uptr just_read = internal_read(fd, *buff + read_len, PageSize);
+ if (just_read == 0) {
+ reached_eof = true;
+ break;
+ }
+ read_len += just_read;
+ }
+ internal_close(fd);
+ if (reached_eof) // We've read the whole file.
+ break;
+ }
+ return read_len;
+}
+
+// We don't want to use std::sort to avoid including <algorithm>, as
+// we may end up with two implementation of std::sort - one in instrumented
+// code, and the other in runtime.
+// qsort() from stdlib won't work as it calls malloc(), which results
+// in deadlock in ASan allocator.
+// We re-implement in-place sorting w/o recursion as straightforward heapsort.
+void SortArray(uptr *array, uptr size) {
+ if (size < 2)
+ return;
+ // Stage 1: insert elements to the heap.
+ for (uptr i = 1; i < size; i++) {
+ uptr j, p;
+ for (j = i; j > 0; j = p) {
+ p = (j - 1) / 2;
+ if (array[j] > array[p])
+ Swap(array[j], array[p]);
+ else
+ break;
+ }
+ }
+ // Stage 2: swap largest element with the last one,
+ // and sink the new top.
+ for (uptr i = size - 1; i > 0; i--) {
+ Swap(array[0], array[i]);
+ uptr j, max_ind;
+ for (j = 0; j < i; j = max_ind) {
+ uptr left = 2 * j + 1;
+ uptr right = 2 * j + 2;
+ max_ind = j;
+ if (left < i && array[left] > array[max_ind])
+ max_ind = left;
+ if (right < i && array[right] > array[max_ind])
+ max_ind = right;
+ if (max_ind != j)
+ Swap(array[j], array[max_ind]);
+ else
+ break;
+ }
+ }
+}
+
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by maping a bit more and then unmaping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+// uptr PageSize = GetPageSizeCached();
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
+ uptr end = res + size;
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
+void ReportErrorSummary(const char *error_type, const char *file,
+ int line, const char *function) {
+ const int kMaxSize = 1024; // We don't want a summary too long.
+ InternalScopedBuffer<char> buff(kMaxSize);
+ internal_snprintf(buff.data(), kMaxSize, "%s: %s %s:%d %s",
+ SanitizerToolName, error_type,
+ file ? file : "??", line, function ? function : "??");
+ __sanitizer_report_error_summary(buff.data());
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ if (!path) return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(report_path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+ internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
+ report_path_prefix[len] = '\0';
+ report_fd = kInvalidFd;
+ log_to_file = true;
+}
+
+void __sanitizer_set_report_fd(int fd) {
+ if (report_fd != kStdoutFd &&
+ report_fd != kStderrFd &&
+ report_fd != kInvalidFd)
+ internal_close(report_fd);
+ report_fd = fd;
+}
+
+void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
+ (void)reserved;
+ PrepareForSandboxing();
+}
+
+void __sanitizer_report_error_summary(const char *error_summary) {
+ Printf("SUMMARY: %s\n", error_summary);
+}
+} // extern "C"
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.h
new file mode 100644
index 000000000..302dc7427
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -0,0 +1,262 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// It declares common functions and classes that are used in both runtimes.
+// Implementation of some functions are provided in sanitizer_common, while
+// others must be defined by run-time library itself.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_H
+#define SANITIZER_COMMON_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+struct StackTrace;
+
+// Constants.
+const uptr kWordSize = SANITIZER_WORDSIZE / 8;
+const uptr kWordSizeInBits = 8 * kWordSize;
+
+#if defined(__powerpc__) || defined(__powerpc64__)
+const uptr kCacheLineSize = 128;
+#else
+const uptr kCacheLineSize = 64;
+#endif
+
+extern const char *SanitizerToolName; // Can be changed by the tool.
+
+uptr GetPageSize();
+uptr GetPageSizeCached();
+uptr GetMmapGranularity();
+// Threads
+int GetPid();
+uptr GetTid();
+uptr GetThreadSelf();
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom);
+
+// Memory management
+void *MmapOrDie(uptr size, const char *mem_type);
+void UnmapOrDie(void *addr, uptr size);
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapFixedOrDie(uptr fixed_addr, uptr size);
+void *Mprotect(uptr fixed_addr, uptr size);
+// Map aligned chunk of address space; size and alignment are powers of two.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
+// Used to check if we can map shadow memory to a fixed location.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+void FlushUnneededShadowMemory(uptr addr, uptr size);
+
+// Internal allocator
+void *InternalAlloc(uptr size);
+void InternalFree(void *p);
+
+// InternalScopedBuffer can be used instead of large stack arrays to
+// keep frame size low.
+// FIXME: use InternalAlloc instead of MmapOrDie once
+// InternalAlloc is made libc-free.
+template<typename T>
+class InternalScopedBuffer {
+ public:
+ explicit InternalScopedBuffer(uptr cnt) {
+ cnt_ = cnt;
+ ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
+ }
+ ~InternalScopedBuffer() {
+ UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ }
+ T &operator[](uptr i) { return ptr_[i]; }
+ T *data() { return ptr_; }
+ uptr size() { return cnt_ * sizeof(T); }
+
+ private:
+ T *ptr_;
+ uptr cnt_;
+ // Disallow evil constructors.
+ InternalScopedBuffer(const InternalScopedBuffer&);
+ void operator=(const InternalScopedBuffer&);
+};
+
+// Simple low-level (mmap-based) allocator for internal use. Doesn't have
+// constructor, so all instances of LowLevelAllocator should be
+// linker initialized.
+class LowLevelAllocator {
+ public:
+ // Requires an external lock.
+ void *Allocate(uptr size);
+ private:
+ char *allocated_end_;
+ char *allocated_current_;
+};
+typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
+// Allows to register tool-specific callbacks for LowLevelAllocator.
+// Passing NULL removes the callback.
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
+
+// IO
+void RawWrite(const char *buffer);
+bool PrintsToTty();
+void Printf(const char *format, ...);
+void Report(const char *format, ...);
+void SetPrintfAndReportCallback(void (*callback)(const char *));
+
+fd_t OpenFile(const char *filename, bool write);
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// The resulting buffer is mmaped and stored in '*buff'.
+// The size of the mmaped region is stored in '*buff_size',
+// Returns the number of read bytes or 0 if file can not be opened.
+uptr ReadFileToBuffer(const char *file_name, char **buff,
+ uptr *buff_size, uptr max_len);
+// Maps given file to virtual memory, and returns pointer to it
+// (or NULL if the mapping failes). Stores the size of mmaped region
+// in '*buff_size'.
+void *MapFileToMemory(const char *file_name, uptr *buff_size);
+
+// OS
+void DisableCoreDumper();
+void DumpProcessMap();
+bool FileExists(const char *filename);
+const char *GetEnv(const char *name);
+const char *GetPwd();
+u32 GetUid();
+void ReExec();
+bool StackSizeIsUnlimited();
+void SetStackSizeLimitInBytes(uptr limit);
+void PrepareForSandboxing();
+
+// Other
+void SleepForSeconds(int seconds);
+void SleepForMillis(int millis);
+int Atexit(void (*function)(void));
+void SortArray(uptr *array, uptr size);
+
+// Exit
+void NORETURN Abort();
+void NORETURN Die();
+void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
+CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+
+// Set the name of the current thread to 'name', return true on succees.
+// The name may be truncated to a system-dependent limit.
+bool SanitizerSetThreadName(const char *name);
+// Get the name of the current thread (no more than max_len bytes),
+// return true on succees. name should have space for at least max_len+1 bytes.
+bool SanitizerGetThreadName(char *name, int max_len);
+
+// Specific tools may override behavior of "Die" and "CheckFailed" functions
+// to do tool-specific job.
+void SetDieCallback(void (*callback)(void));
+typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
+ u64, u64);
+void SetCheckFailedCallback(CheckFailedCallbackType callback);
+
+// Construct a one-line string like
+// SanitizerToolName: error_type file:line function
+// and call __sanitizer_report_error_summary on it.
+void ReportErrorSummary(const char *error_type, const char *file,
+ int line, const char *function);
+
+// Math
+#if defined(_WIN32) && !defined(__clang__)
+extern "C" {
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
+#if defined(_WIN64)
+unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
+unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
+#endif
+}
+#endif
+
+INLINE uptr MostSignificantSetBitIndex(uptr x) {
+ CHECK(x != 0);
+ unsigned long up; // NOLINT
+#if !defined(_WIN32) || defined(__clang__)
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, x);
+#else
+ _BitScanReverse(&up, x);
+#endif
+ return up;
+}
+
+INLINE bool IsPowerOfTwo(uptr x) {
+ return (x & (x - 1)) == 0;
+}
+
+INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+ CHECK(size);
+ if (IsPowerOfTwo(size)) return size;
+
+ uptr up = MostSignificantSetBitIndex(size);
+ CHECK(size < (1ULL << (up + 1)));
+ CHECK(size > (1ULL << up));
+ return 1UL << (up + 1);
+}
+
+INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+ CHECK(IsPowerOfTwo(boundary));
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
+INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+ return x & ~(boundary - 1);
+}
+
+INLINE bool IsAligned(uptr a, uptr alignment) {
+ return (a & (alignment - 1)) == 0;
+}
+
+INLINE uptr Log2(uptr x) {
+ CHECK(IsPowerOfTwo(x));
+#if !defined(_WIN32) || defined(__clang__)
+ return __builtin_ctzl(x);
+#elif defined(_WIN64)
+ unsigned long ret; // NOLINT
+ _BitScanForward64(&ret, x);
+ return ret;
+#else
+ unsigned long ret; // NOLINT
+ _BitScanForward(&ret, x);
+ return ret;
+#endif
+}
+
+// Don't use std::min, std::max or std::swap, to minimize dependency
+// on libstdc++.
+template<class T> T Min(T a, T b) { return a < b ? a : b; }
+template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template<class T> void Swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+// Char handling
+INLINE bool IsSpace(int c) {
+ return (c == ' ') || (c == '\n') || (c == '\t') ||
+ (c == '\f') || (c == '\r') || (c == '\v');
+}
+INLINE bool IsDigit(int c) {
+ return (c >= '0') && (c <= '9');
+}
+INLINE int ToLower(int c) {
+ return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
+}
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_COMMON_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
new file mode 100644
index 000000000..af27603eb
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -0,0 +1,337 @@
+//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_FD_ACQUIRE
+// COMMON_INTERCEPTOR_FD_RELEASE
+// COMMON_INTERCEPTOR_SET_THREAD_NAME
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+#include <stdarg.h>
+
+#ifdef _WIN32
+#define va_copy(dst, src) ((dst) = (src))
+#endif // _WIN32
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_READ INTERCEPT_FUNCTION(read)
+#else
+#define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD INTERCEPT_FUNCTION(pread)
+#else
+#define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
+#else
+#define INIT_PREAD64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITE
+INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(write)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_WRITE INTERCEPT_FUNCTION(write)
+#else
+#define INIT_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
+#else
+#define INIT_PWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE64
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
+#else
+#define INIT_PWRITE64
+#endif
+
+#if SANITIZER_INTERCEPT_PRCTL
+INTERCEPTOR(int, prctl, int option,
+ unsigned long arg2, unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
+ static const int PR_SET_NAME = 15;
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ char buff[16];
+ internal_strncpy(buff, (char *)arg2, 15);
+ buff[15] = 0;
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ }
+ return res;
+}
+#define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
+#else
+#define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+
+#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+INTERCEPTOR(void *, localtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep);
+ void *res = REAL(localtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, localtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result);
+ void *res = REAL(localtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, gmtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep);
+ void *res = REAL(gmtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(void *, gmtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result);
+ void *res = REAL(gmtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ char *res = REAL(ctime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ char *res = REAL(ctime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime, void *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ char *res = REAL(asctime)(tm);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime_r, void *tm, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ char *res = REAL(asctime_r)(tm, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_LOCALTIME_AND_FRIENDS \
+ INTERCEPT_FUNCTION(localtime); \
+ INTERCEPT_FUNCTION(localtime_r); \
+ INTERCEPT_FUNCTION(gmtime); \
+ INTERCEPT_FUNCTION(gmtime_r); \
+ INTERCEPT_FUNCTION(ctime); \
+ INTERCEPT_FUNCTION(ctime_r); \
+ INTERCEPT_FUNCTION(asctime); \
+ INTERCEPT_FUNCTION(asctime_r);
+#else
+#define INIT_LOCALTIME_AND_FRIENDS
+#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+
+#if SANITIZER_INTERCEPT_SCANF
+
+#include "sanitizer_common_interceptors_scanf.inc"
+
+#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ if (res > 0) \
+ scanf_common(ctx, res, allowGnuMalloc, format, aq); \
+ va_end(aq); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap)
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap)
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
+
+#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, name, __VA_ARGS__); \
+ va_list ap; \
+ va_start(ap, format); \
+ int res = vname(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+INTERCEPTOR(int, scanf, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format)
+
+INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+
+INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
+SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+#endif
+
+#define INIT_SCANF \
+ INTERCEPT_FUNCTION(scanf); \
+ INTERCEPT_FUNCTION(sscanf); \
+ INTERCEPT_FUNCTION(fscanf); \
+ INTERCEPT_FUNCTION(vscanf); \
+ INTERCEPT_FUNCTION(vsscanf); \
+ INTERCEPT_FUNCTION(vfscanf); \
+ INTERCEPT_FUNCTION(__isoc99_scanf); \
+ INTERCEPT_FUNCTION(__isoc99_sscanf); \
+ INTERCEPT_FUNCTION(__isoc99_fscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vsscanf); \
+ INTERCEPT_FUNCTION(__isoc99_vfscanf);
+
+#else
+#define INIT_SCANF
+#endif
+
+#define SANITIZER_COMMON_INTERCEPTORS_INIT \
+ INIT_READ; \
+ INIT_PREAD; \
+ INIT_PREAD64; \
+ INIT_PRCTL; \
+ INIT_WRITE; \
+ INIT_PWRITE; \
+ INIT_PWRITE64; \
+ INIT_LOCALTIME_AND_FRIENDS; \
+ INIT_SCANF;
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
new file mode 100644
index 000000000..5b761382d
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
@@ -0,0 +1,307 @@
+//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 of not specified ("%n$")
+ int fieldWidth;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return 0;
+ }
+ // %n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ if (*q == '$') {
+ dir->argIdx = number;
+ p = q + 1;
+ }
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ }
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width.
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ if (dir->fieldWidth <= 0)
+ return 0;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ if (char_is_one_of(*p, "jztLq")) {
+ dir->lengthModifier[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ dir->lengthModifier[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ dir->lengthModifier[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ dir->lengthModifier[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ dir->lengthModifier[1] = 'l';
+ ++p;
+ }
+ }
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return 0; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return 0;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ break;
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool scanf_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool scanf_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int scanf_get_char_size(ScanfDirective *dir) {
+ if (char_is_one_of(dir->convSpecifier, "CS")) {
+ // wchar_t
+ return 0;
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cs[")) {
+ if (dir->lengthModifier[0] == 'l')
+ // wchar_t
+ return 0;
+ else if (dir->lengthModifier[0] == 0)
+ return sizeof(char);
+ else
+ return 0;
+ }
+
+ return 0;
+}
+
+enum ScanfStoreSize {
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ SSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ SSS_INVALID = 0
+};
+
+// Returns the store size of a scanf directive (if >0), or a value of
+// ScanfStoreSize.
+static int scanf_get_store_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return SSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return SSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (scanf_is_integer_conv(dir->convSpecifier)) {
+ switch (dir->lengthModifier[0]) {
+ case 'h':
+ return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return SSS_INVALID;
+ }
+ }
+
+ if (scanf_is_float_conv(dir->convSpecifier)) {
+ switch (dir->lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return dir->lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ return sizeof(float);
+ default:
+ return SSS_INVALID;
+ }
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "sS[")) {
+ unsigned charSize = scanf_get_char_size(dir);
+ if (charSize == 0)
+ return SSS_INVALID;
+ if (dir->fieldWidth == 0)
+ return SSS_STRLEN;
+ return (dir->fieldWidth + 1) * charSize;
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cC")) {
+ unsigned charSize = scanf_get_char_size(dir);
+ if (charSize == 0)
+ return SSS_INVALID;
+ if (dir->fieldWidth == 0)
+ return charSize;
+ return dir->fieldWidth * charSize;
+ }
+
+ if (dir->convSpecifier == 'p') {
+ if (dir->lengthModifier[1] != 0)
+ return SSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return SSS_INVALID;
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ while (*p && n_inputs) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_store_size(&dir);
+ if (size == SSS_INVALID)
+ break;
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (size == SSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.cc
new file mode 100644
index 000000000..2152c7bdf
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.cc
@@ -0,0 +1,96 @@
+//===-- sanitizer_flags.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flags.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+static bool GetFlagValue(const char *env, const char *name,
+ const char **value, int *value_length) {
+ if (env == 0)
+ return false;
+ const char *pos = internal_strstr(env, name);
+ const char *end;
+ if (pos == 0)
+ return false;
+ pos += internal_strlen(name);
+ if (pos[0] != '=') {
+ end = pos;
+ } else {
+ pos += 1;
+ if (pos[0] == '"') {
+ pos += 1;
+ end = internal_strchr(pos, '"');
+ } else if (pos[0] == '\'') {
+ pos += 1;
+ end = internal_strchr(pos, '\'');
+ } else {
+ // Read until the next space or colon.
+ end = pos + internal_strcspn(pos, " :");
+ }
+ if (end == 0)
+ end = pos + internal_strlen(pos);
+ }
+ *value = pos;
+ *value_length = end - pos;
+ return true;
+}
+
+static bool StartsWith(const char *flag, int flag_length, const char *value) {
+ if (!flag || !value)
+ return false;
+ int value_length = internal_strlen(value);
+ return (flag_length >= value_length) &&
+ (0 == internal_strncmp(flag, value, value_length));
+}
+
+void ParseFlag(const char *env, bool *flag, const char *name) {
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
+ return;
+ if (StartsWith(value, value_length, "0") ||
+ StartsWith(value, value_length, "no") ||
+ StartsWith(value, value_length, "false"))
+ *flag = false;
+ if (StartsWith(value, value_length, "1") ||
+ StartsWith(value, value_length, "yes") ||
+ StartsWith(value, value_length, "true"))
+ *flag = true;
+}
+
+void ParseFlag(const char *env, int *flag, const char *name) {
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
+ return;
+ *flag = internal_atoll(value);
+}
+
+static LowLevelAllocator allocator_for_flags;
+
+void ParseFlag(const char *env, const char **flag, const char *name) {
+ const char *value;
+ int value_length;
+ if (!GetFlagValue(env, name, &value, &value_length))
+ return;
+ // Copy the flag value. Don't use locks here, as flags are parsed at
+ // tool startup.
+ char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
+ internal_memcpy(value_copy, value, value_length);
+ value_copy[value_length] = '\0';
+ *flag = value_copy;
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.h
new file mode 100644
index 000000000..1b9bf5bff
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_flags.h
@@ -0,0 +1,25 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAGS_H
+#define SANITIZER_FLAGS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+void ParseFlag(const char *env, bool *flag, const char *name);
+void ParseFlag(const char *env, int *flag, const char *name);
+void ParseFlag(const char *env, const char **flag, const char *name);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAGS_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
new file mode 100644
index 000000000..577c9a9c1
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -0,0 +1,268 @@
+//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+// It contains macro used in run-time libraries code.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_DEFS_H
+#define SANITIZER_DEFS_H
+
+#if defined(_WIN32)
+// FIXME find out what we need on Windows. __declspec(dllexport) ?
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#elif defined(SANITIZER_GO)
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
+#endif
+
+#ifdef __linux__
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+#else
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
+#endif
+
+// GCC does not understand __has_feature
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+// For portability reasons we do not include stddef.h, stdint.h or any other
+// system header, but we do need some basic types that are not defined
+// in a portable way by the language itself.
+namespace __sanitizer {
+
+#if defined(_WIN64)
+// 64-bit Windows uses LLP64 data model.
+typedef unsigned long long uptr; // NOLINT
+typedef signed long long sptr; // NOLINT
+#else
+typedef unsigned long uptr; // NOLINT
+typedef signed long sptr; // NOLINT
+#endif // defined(_WIN64)
+#if defined(__x86_64__)
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// 64-bit pointer to unwind stack frame.
+typedef unsigned long long uhwptr; // NOLINT
+#else
+typedef uptr uhwptr; // NOLINT
+#endif
+typedef unsigned char u8;
+typedef unsigned short u16; // NOLINT
+typedef unsigned int u32;
+typedef unsigned long long u64; // NOLINT
+typedef signed char s8;
+typedef signed short s16; // NOLINT
+typedef signed int s32;
+typedef signed long long s64; // NOLINT
+typedef int fd_t;
+
+} // namespace __sanitizer
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ void __sanitizer_set_report_path(const char *path)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Tell the tools to write their reports to given file descriptor instead of
+ // stderr.
+ void __sanitizer_set_report_fd(int fd)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // Notify the tools that the sandbox is going to be turned on. The reserved
+ // parameter will be used in the future to hold a structure with functions
+ // that the tools may call to bypass the sandbox.
+ void __sanitizer_sandbox_on_notify(void *reserved)
+ SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ void __sanitizer_report_error_summary(const char *error_summary)
+ SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
+} // extern "C"
+
+
+using namespace __sanitizer; // NOLINT
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers to avoid portability issues.
+
+// Common defs.
+#define INLINE static inline
+#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#define WEAK SANITIZER_WEAK_ATTRIBUTE
+
+// Platform-specific defs.
+#if defined(_MSC_VER)
+# define ALWAYS_INLINE __declspec(forceinline)
+// FIXME(timurrrr): do we need this on Windows?
+# define ALIAS(x)
+# define ALIGNED(x) __declspec(align(x))
+# define FORMAT(f, a)
+# define NOINLINE __declspec(noinline)
+# define NORETURN __declspec(noreturn)
+# define THREADLOCAL __declspec(thread)
+# define NOTHROW
+# define LIKELY(x) (x)
+# define UNLIKELY(x) (x)
+# define UNUSED
+# define USED
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
+#else // _MSC_VER
+# define ALWAYS_INLINE __attribute__((always_inline))
+# define ALIAS(x) __attribute__((alias(x)))
+# define ALIGNED(x) __attribute__((aligned(x)))
+# define FORMAT(f, a) __attribute__((format(printf, f, a)))
+# define NOINLINE __attribute__((noinline))
+# define NORETURN __attribute__((noreturn))
+# define THREADLOCAL __thread
+# define NOTHROW throw()
+# define LIKELY(x) __builtin_expect(!!(x), 1)
+# define UNLIKELY(x) __builtin_expect(!!(x), 0)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+# if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(x) generates prefetchnt0 on x86
+# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
+# else
+# define PREFETCH(x) __builtin_prefetch(x)
+# endif
+#endif // _MSC_VER
+
+#if defined(_WIN32)
+typedef unsigned long DWORD; // NOLINT
+typedef DWORD thread_return_t;
+# define THREAD_CALLING_CONV __stdcall
+#else // _WIN32
+typedef void* thread_return_t;
+# define THREAD_CALLING_CONV
+#endif // _WIN32
+typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+// NOTE: Functions below must be defined in each run-time.
+namespace __sanitizer {
+void NORETURN Die();
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+} // namespace __sanitizer
+
+// Check macro
+#define RAW_CHECK_MSG(expr, msg) do { \
+ if (!(expr)) { \
+ RawWrite(msg); \
+ Die(); \
+ } \
+} while (0)
+
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+
+#define CHECK_IMPL(c1, op, c2) \
+ do { \
+ __sanitizer::u64 v1 = (u64)(c1); \
+ __sanitizer::u64 v2 = (u64)(c2); \
+ if (!(v1 op v2)) \
+ __sanitizer::CheckFailed(__FILE__, __LINE__, \
+ "(" #c1 ") " #op " (" #c2 ")", v1, v2); \
+ } while (false) \
+/**/
+
+#define CHECK(a) CHECK_IMPL((a), !=, 0)
+#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))
+#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))
+#define CHECK_LT(a, b) CHECK_IMPL((a), <, (b))
+#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))
+#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
+#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
+
+#if TSAN_DEBUG
+#define DCHECK(a) CHECK(a)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#else
+#define DCHECK(a)
+#define DCHECK_EQ(a, b)
+#define DCHECK_NE(a, b)
+#define DCHECK_LT(a, b)
+#define DCHECK_LE(a, b)
+#define DCHECK_GT(a, b)
+#define DCHECK_GE(a, b)
+#endif
+
+#define UNREACHABLE(msg) do { \
+ CHECK(0 && msg); \
+ Die(); \
+} while (0)
+
+#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
+
+#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+
+#define IMPL_PASTE(a, b) a##b
+#define IMPL_COMPILER_ASSERT(pred, line) \
+ typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
+
+// Limits for integral types. We have to redefine it in case we don't
+// have stdint.h (like in Visual Studio 9).
+#undef __INT64_C
+#undef __UINT64_C
+#if SANITIZER_WORDSIZE == 64
+# define __INT64_C(c) c ## L
+# define __UINT64_C(c) c ## UL
+#else
+# define __INT64_C(c) c ## LL
+# define __UINT64_C(c) c ## ULL
+#endif // SANITIZER_WORDSIZE == 64
+#undef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#undef INT32_MAX
+#define INT32_MAX (2147483647)
+#undef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#undef INT64_MIN
+#define INT64_MIN (-__INT64_C(9223372036854775807)-1)
+#undef INT64_MAX
+#define INT64_MAX (__INT64_C(9223372036854775807))
+#undef UINT64_MAX
+#define UINT64_MAX (__UINT64_C(18446744073709551615))
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+#if !defined(_MSC_VER) || defined(__clang__)
+# define GET_CALLER_PC() (uptr)__builtin_return_address(0)
+# define GET_CURRENT_FRAME() (uptr)__builtin_frame_address(0)
+#else
+extern "C" void* _ReturnAddress(void);
+# pragma intrinsic(_ReturnAddress)
+# define GET_CALLER_PC() (uptr)_ReturnAddress()
+// CaptureStackBackTrace doesn't need to know BP on Windows.
+// FIXME: This macro is still used when printing error reports though it's not
+// clear if the BP value is needed in the ASan reports on Windows.
+# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+#endif
+
+#define HANDLE_EINTR(res, f) { \
+ do { \
+ res = (f); \
+ } while (res == -1 && errno == EINTR); \
+ }
+
+#endif // SANITIZER_DEFS_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_lfstack.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_lfstack.h
new file mode 100644
index 000000000..8033b9a7f
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_lfstack.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lock-free stack.
+// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
+// The memory passed to Push() must not be ever munmap'ed.
+// The type T must contain T *next field.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LFSTACK_H
+#define SANITIZER_LFSTACK_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+template<typename T>
+struct LFStack {
+ void Clear() {
+ atomic_store(&head_, 0, memory_order_relaxed);
+ }
+
+ bool Empty() const {
+ return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
+ }
+
+ void Push(T *p) {
+ u64 cmp = atomic_load(&head_, memory_order_relaxed);
+ for (;;) {
+ u64 cnt = (cmp & kCounterMask) + kCounterInc;
+ u64 xch = (u64)(uptr)p | cnt;
+ p->next = (T*)(uptr)(cmp & kPtrMask);
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_release))
+ break;
+ }
+ }
+
+ T *Pop() {
+ u64 cmp = atomic_load(&head_, memory_order_acquire);
+ for (;;) {
+ T *cur = (T*)(uptr)(cmp & kPtrMask);
+ if (cur == 0)
+ return 0;
+ T *nxt = cur->next;
+ u64 cnt = (cmp & kCounterMask);
+ u64 xch = (u64)(uptr)nxt | cnt;
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_acquire))
+ return cur;
+ }
+ }
+
+ // private:
+ static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
+ static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
+ static const u64 kCounterMask = ~kPtrMask;
+ static const u64 kCounterInc = kPtrMask + 1;
+
+ atomic_uint64_t head_;
+};
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_LFSTACK_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.cc
new file mode 100644
index 000000000..c57128a54
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -0,0 +1,225 @@
+//===-- sanitizer_libc.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer_libc.h for details.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+s64 internal_atoll(const char *nptr) {
+ return internal_simple_strtoll(nptr, (char**)0, 10);
+}
+
+void *internal_memchr(const void *s, int c, uptr n) {
+ const char* t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t)
+ if (*t == c)
+ return (void*)t;
+ return 0;
+}
+
+int internal_memcmp(const void* s1, const void* s2, uptr n) {
+ const char* t1 = (char*)s1;
+ const char* t2 = (char*)s2;
+ for (uptr i = 0; i < n; ++i, ++t1, ++t2)
+ if (*t1 != *t2)
+ return *t1 < *t2 ? -1 : 1;
+ return 0;
+}
+
+void *internal_memcpy(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ char *s = (char*)src;
+ for (uptr i = 0; i < n; ++i)
+ d[i] = s[i];
+ return dest;
+}
+
+void *internal_memmove(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ char *s = (char*)src;
+ sptr i, signed_n = (sptr)n;
+ CHECK_GE(signed_n, 0);
+ if (d < s) {
+ for (i = 0; i < signed_n; ++i)
+ d[i] = s[i];
+ } else {
+ if (d > s && signed_n > 0)
+ for (i = signed_n - 1; i >= 0 ; --i) {
+ d[i] = s[i];
+ }
+ }
+ return dest;
+}
+
+void *internal_memset(void* s, int c, uptr n) {
+ // The next line prevents Clang from making a call to memset() instead of the
+ // loop below.
+ // FIXME: building the runtime with -ffreestanding is a better idea. However
+ // there currently are linktime problems due to PR12396.
+ char volatile *t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ *t = c;
+ }
+ return s;
+}
+
+uptr internal_strcspn(const char *s, const char *reject) {
+ uptr i;
+ for (i = 0; s[i]; i++) {
+ if (internal_strchr(reject, s[i]) != 0)
+ return i;
+ }
+ return i;
+}
+
+char* internal_strdup(const char *s) {
+ uptr len = internal_strlen(s);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+int internal_strcmp(const char *s1, const char *s2) {
+ while (true) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+int internal_strncmp(const char *s1, const char *s2, uptr n) {
+ for (uptr i = 0; i < n; i++) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+char* internal_strchr(const char *s, int c) {
+ while (true) {
+ if (*s == (char)c)
+ return (char*)s;
+ if (*s == 0)
+ return 0;
+ s++;
+ }
+}
+
+char *internal_strrchr(const char *s, int c) {
+ const char *res = 0;
+ for (uptr i = 0; s[i]; i++) {
+ if (s[i] == c) res = s + i;
+ }
+ return (char*)res;
+}
+
+uptr internal_strlen(const char *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+char *internal_strncat(char *dst, const char *src, uptr n) {
+ uptr len = internal_strlen(dst);
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[len + i] = src[i];
+ dst[len + i] = 0;
+ return dst;
+}
+
+char *internal_strncpy(char *dst, const char *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[i] = src[i];
+ for (; i < n; i++)
+ dst[i] = '\0';
+ return dst;
+}
+
+uptr internal_strnlen(const char *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
+char *internal_strstr(const char *haystack, const char *needle) {
+ // This is O(N^2), but we are not using it in hot places.
+ uptr len1 = internal_strlen(haystack);
+ uptr len2 = internal_strlen(needle);
+ if (len1 < len2) return 0;
+ for (uptr pos = 0; pos <= len1 - len2; pos++) {
+ if (internal_memcmp(haystack + pos, needle, len2) == 0)
+ return (char*)haystack + pos;
+ }
+ return 0;
+}
+
+s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
+ CHECK_EQ(base, 10);
+ while (IsSpace(*nptr)) nptr++;
+ int sgn = 1;
+ u64 res = 0;
+ bool have_digits = false;
+ char *old_nptr = (char*)nptr;
+ if (*nptr == '+') {
+ sgn = 1;
+ nptr++;
+ } else if (*nptr == '-') {
+ sgn = -1;
+ nptr++;
+ }
+ while (IsDigit(*nptr)) {
+ res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;
+ int digit = ((*nptr) - '0');
+ res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;
+ have_digits = true;
+ nptr++;
+ }
+ if (endptr != 0) {
+ *endptr = (have_digits) ? (char*)nptr : old_nptr;
+ }
+ if (sgn > 0) {
+ return (s64)(Min((u64)INT64_MAX, res));
+ } else {
+ return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);
+ }
+}
+
+bool mem_is_zero(const char *beg, uptr size) {
+ CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ const char *end = beg + size;
+ uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
+ uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
+ uptr all = 0;
+ // Prologue.
+ for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
+ all |= *mem;
+ // Aligned loop.
+ for (; aligned_beg < aligned_end; aligned_beg++)
+ all |= *aligned_beg;
+ // Epilogue.
+ if ((char*)aligned_end >= beg)
+ for (const char *mem = (char*)aligned_end; mem < end; mem++)
+ all |= *mem;
+ return all == 0;
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.h
new file mode 100644
index 000000000..162394133
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -0,0 +1,87 @@
+//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// These tools can not use some of the libc functions directly because those
+// functions are intercepted. Instead, we implement a tiny subset of libc here.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LIBC_H
+#define SANITIZER_LIBC_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// internal_X() is a custom implementation of X() for use in RTL.
+
+// String functions
+s64 internal_atoll(const char *nptr);
+void *internal_memchr(const void *s, int c, uptr n);
+int internal_memcmp(const void* s1, const void* s2, uptr n);
+void *internal_memcpy(void *dest, const void *src, uptr n);
+void *internal_memmove(void *dest, const void *src, uptr n);
+// Should not be used in performance-critical places.
+void *internal_memset(void *s, int c, uptr n);
+char* internal_strchr(const char *s, int c);
+int internal_strcmp(const char *s1, const char *s2);
+uptr internal_strcspn(const char *s, const char *reject);
+char *internal_strdup(const char *s);
+uptr internal_strlen(const char *s);
+char *internal_strncat(char *dst, const char *src, uptr n);
+int internal_strncmp(const char *s1, const char *s2, uptr n);
+char *internal_strncpy(char *dst, const char *src, uptr n);
+uptr internal_strnlen(const char *s, uptr maxlen);
+char *internal_strrchr(const char *s, int c);
+// This is O(N^2), but we are not using it in hot places.
+char *internal_strstr(const char *haystack, const char *needle);
+// Works only for base=10 and doesn't set errno.
+s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+
+// Return true if all bytes in [mem, mem+size) are zero.
+// Optimized for the case when the result is true.
+bool mem_is_zero(const char *mem, uptr size);
+
+
+// Memory
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset);
+int internal_munmap(void *addr, uptr length);
+
+// I/O
+const fd_t kInvalidFd = -1;
+const fd_t kStdinFd = 0;
+const fd_t kStdoutFd = 1;
+const fd_t kStderrFd = 2;
+int internal_close(fd_t fd);
+int internal_isatty(fd_t fd);
+
+// Use __sanitizer::OpenFile() instead.
+fd_t internal_open(const char *filename, int flags);
+fd_t internal_open(const char *filename, int flags, u32 mode);
+
+uptr internal_read(fd_t fd, void *buf, uptr count);
+uptr internal_write(fd_t fd, const void *buf, uptr count);
+
+// OS
+uptr internal_filesize(fd_t fd); // -1 on error.
+int internal_stat(const char *path, void *buf);
+int internal_lstat(const char *path, void *buf);
+int internal_fstat(fd_t fd, void *buf);
+int internal_dup2(int oldfd, int newfd);
+uptr internal_readlink(const char *path, char *buf, uptr bufsize);
+void NORETURN internal__exit(int exitcode);
+
+// Threading
+int internal_sched_yield();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBC_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_linux.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_linux.cc
new file mode 100644
index 000000000..06e5a0a64
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -0,0 +1,542 @@
+//===-- sanitizer_linux.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#ifdef __linux__
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+#include <unwind.h>
+#include <errno.h>
+
+// <linux/futex.h> is broken on some linux distributions.
+const int FUTEX_WAIT = 0;
+const int FUTEX_WAKE = 1;
+
+// Are we using 32-bit or 64-bit syscalls?
+// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
+// but it still needs to use 64-bit syscalls.
+#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+#else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+#endif
+
+namespace __sanitizer {
+
+// --------------- sanitizer_libc.h
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
+#else
+ return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+#endif
+}
+
+int internal_munmap(void *addr, uptr length) {
+ return syscall(__NR_munmap, addr, length);
+}
+
+int internal_close(fd_t fd) {
+ return syscall(__NR_close, fd);
+}
+
+fd_t internal_open(const char *filename, int flags) {
+ return syscall(__NR_open, filename, flags);
+}
+
+fd_t internal_open(const char *filename, int flags, u32 mode) {
+ return syscall(__NR_open, filename, flags, mode);
+}
+
+fd_t OpenFile(const char *filename, bool write) {
+ return internal_open(filename,
+ write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
+ return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
+ return res;
+}
+
+int internal_stat(const char *path, void *buf) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return syscall(__NR_stat, path, buf);
+#else
+ return syscall(__NR_stat64, path, buf);
+#endif
+}
+
+int internal_lstat(const char *path, void *buf) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return syscall(__NR_lstat, path, buf);
+#else
+ return syscall(__NR_lstat64, path, buf);
+#endif
+}
+
+int internal_fstat(fd_t fd, void *buf) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return syscall(__NR_fstat, fd, buf);
+#else
+ return syscall(__NR_fstat64, fd, buf);
+#endif
+}
+
+uptr internal_filesize(fd_t fd) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ struct stat st;
+#else
+ struct stat64 st;
+#endif
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ return syscall(__NR_dup2, oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return (uptr)syscall(__NR_readlink, path, buf, bufsize);
+}
+
+int internal_sched_yield() {
+ return syscall(__NR_sched_yield);
+}
+
+void internal__exit(int exitcode) {
+ syscall(__NR_exit_group, exitcode);
+ Die(); // Unreachable.
+}
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ struct stat st;
+ if (syscall(__NR_stat, filename, &st))
+ return false;
+#else
+ struct stat64 st;
+ if (syscall(__NR_stat64, filename, &st))
+ return false;
+#endif
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetTid() {
+ return syscall(__NR_gettid);
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ if (at_initialization) {
+ // This is the main thread. Libpthread may not be initialized yet.
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+
+ // Find the mapping that contains a stack variable.
+ MemoryMappingLayout proc_maps;
+ uptr start, end, offset;
+ uptr prev_end = 0;
+ while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
+ if ((uptr)&rl < end)
+ break;
+ prev_end = end;
+ }
+ CHECK((uptr)&rl >= start && (uptr)&rl < end);
+
+ // Get stacksize from rlimit, but clip it so that it does not overlap
+ // with other mappings.
+ uptr stacksize = rl.rlim_cur;
+ if (stacksize > end - prev_end)
+ stacksize = end - prev_end;
+ // When running with unlimited stack size, we still want to set some limit.
+ // The unlimited stack size is caused by 'ulimit -s unlimited'.
+ // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
+ if (stacksize > kMaxThreadStackSize)
+ stacksize = kMaxThreadStackSize;
+ *stack_top = end;
+ *stack_bottom = end - stacksize;
+ return;
+ }
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ uptr stacksize = 0;
+ void *stackaddr = 0;
+ pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ pthread_attr_destroy(&attr);
+
+ *stack_top = (uptr)stackaddr + stacksize;
+ *stack_bottom = (uptr)stackaddr;
+ CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
+}
+
+// Like getenv, but reads env directly from /proc and does not use libc.
+// This function should be called first inside __asan_init.
+const char *GetEnv(const char *name) {
+ static char *environ;
+ static uptr len;
+ static bool inited;
+ if (!inited) {
+ inited = true;
+ uptr environ_size;
+ len = ReadFileToBuffer("/proc/self/environ",
+ &environ, &environ_size, 1 << 26);
+ }
+ if (!environ || len == 0) return 0;
+ uptr namelen = internal_strlen(name);
+ const char *p = environ;
+ while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
+ // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
+ const char* endp =
+ (char*)internal_memchr(p, '\0', len - (p - environ));
+ if (endp == 0) // this entry isn't NUL terminated
+ return 0;
+ else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
+ return p + namelen + 1; // point after =
+ p = endp + 1;
+ }
+ return 0; // Not found.
+}
+
+#ifdef __GLIBC__
+
+extern "C" {
+ extern void *__libc_stack_end;
+}
+
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+ uptr *stack_end = (uptr *)__libc_stack_end;
+ int argc = *stack_end;
+ *argv = (char**)(stack_end + 1);
+ *envp = (char**)(stack_end + argc + 2);
+}
+
+#else // __GLIBC__
+
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+ int arr_size) {
+ char *buff;
+ uptr buff_size = 0;
+ *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+ ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
+ (*arr)[0] = buff;
+ int count, i;
+ for (count = 1, i = 1; ; i++) {
+ if (buff[i] == 0) {
+ if (buff[i+1] == 0) break;
+ (*arr)[count] = &buff[i+1];
+ CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
+ count++;
+ }
+ }
+ (*arr)[count] = 0;
+}
+
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+ static const int kMaxArgv = 2000, kMaxEnvp = 2000;
+ ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
+}
+
+#endif // __GLIBC__
+
+void ReExec() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ execve("/proc/self/exe", argv, envp);
+ Printf("execve failed, errno %d\n", errno);
+ Die();
+}
+
+void PrepareForSandboxing() {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+}
+
+// ----------------- sanitizer_procmaps.h
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+MemoryMappingLayout::MemoryMappingLayout() {
+ proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
+ &proc_self_maps_.mmaped_size, 1 << 26);
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ CacheMemoryMappings();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
+}
+
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ cached_proc_self_maps_.len =
+ ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
+ &cached_proc_self_maps_.mmaped_size, 1 << 26);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
+}
+
+// Parse a hex value in str and update str.
+static uptr ParseHex(char **str) {
+ uptr x = 0;
+ char *s;
+ for (s = *str; ; s++) {
+ char c = *s;
+ uptr v = 0;
+ if (c >= '0' && c <= '9')
+ v = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ v = c - 'a' + 10;
+ else if (c >= 'A' && c <= 'F')
+ v = c - 'A' + 10;
+ else
+ break;
+ x = x * 16 + v;
+ }
+ *str = s;
+ return x;
+}
+
+static bool IsOnOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+static bool IsDecimal(char c) {
+ return c >= '0' && c <= '9';
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ *start = ParseHex(&current_);
+ CHECK_EQ(*current_++, '-');
+ *end = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ CHECK(IsOnOf(*current_++, '-', 'r'));
+ CHECK(IsOnOf(*current_++, '-', 'w'));
+ CHECK(IsOnOf(*current_++, '-', 'x'));
+ CHECK(IsOnOf(*current_++, 's', 'p'));
+ CHECK_EQ(*current_++, ' ');
+ *offset = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ':');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ while (IsDecimal(*current_))
+ current_++;
+ CHECK_EQ(*current_++, ' ');
+ // Skip spaces.
+ while (current_ < next_line && *current_ == ' ')
+ current_++;
+ // Fill in the filename.
+ uptr i = 0;
+ while (current_ < next_line) {
+ if (filename && i < filename_size - 1)
+ filename[i++] = *current_;
+ current_++;
+ }
+ if (filename && i < filename_size)
+ filename[i] = 0;
+ current_ = next_line + 1;
+ return true;
+}
+
+// Gets the object name and the offset by walking MemoryMappingLayout.
+bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+}
+
+bool SanitizerSetThreadName(const char *name) {
+#ifdef PR_SET_NAME
+ return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
+#else
+ return false;
+#endif
+}
+
+bool SanitizerGetThreadName(char *name, int max_len) {
+#ifdef PR_GET_NAME
+ char buff[17];
+ if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
+ return false;
+ internal_strncpy(name, buff, max_len);
+ name[max_len] = 0;
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifndef SANITIZER_GO
+//------------------------- SlowUnwindStack -----------------------------------
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ StackTrace *b = (StackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+static bool MatchPc(uptr cur_pc, uptr trace_pc) {
+ return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ this->size = 0;
+ this->max_size = max_depth;
+ if (max_depth > 1) {
+ _Unwind_Backtrace(Unwind_Trace, this);
+ // We need to pop a few frames so that pc is on top.
+ // trace[0] belongs to the current function so we always pop it.
+ int to_pop = 1;
+ /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
+ else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
+ else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
+ else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
+ else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
+ this->PopStackFrames(to_pop);
+ }
+ this->trace[0] = pc;
+}
+
+#endif // #ifndef SANITIZER_GO
+
+enum MutexState {
+ MtxUnlocked = 0,
+ MtxLocked = 1,
+ MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ CHECK_EQ(owner_, 0);
+}
+
+void BlockingMutex::Lock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
+ syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping)
+ syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
+}
+
+} // namespace __sanitizer
+
+#endif // __linux__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_list.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_list.h
new file mode 100644
index 000000000..9692e01b8
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -0,0 +1,122 @@
+//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains implementation of a list class to be used by
+// ThreadSanitizer, etc run-times.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LIST_H
+#define SANITIZER_LIST_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Intrusive singly-linked list with size(), push_back(), push_front()
+// pop_front(), append_front() and append_back().
+// This class should be a POD (so that it can be put into TLS)
+// and an object with all zero fields should represent a valid empty list.
+// This class does not have a CTOR, so clear() should be called on all
+// non-zero-initialized objects before using.
+template<class Item>
+struct IntrusiveList {
+ void clear() {
+ first_ = last_ = 0;
+ size_ = 0;
+ }
+
+ bool empty() const { return size_ == 0; }
+ uptr size() const { return size_; }
+
+ void push_back(Item *x) {
+ if (empty()) {
+ x->next = 0;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = 0;
+ last_->next = x;
+ last_ = x;
+ size_++;
+ }
+ }
+
+ void push_front(Item *x) {
+ if (empty()) {
+ x->next = 0;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = first_;
+ first_ = x;
+ size_++;
+ }
+ }
+
+ void pop_front() {
+ CHECK(!empty());
+ first_ = first_->next;
+ if (first_ == 0)
+ last_ = 0;
+ size_--;
+ }
+
+ Item *front() { return first_; }
+ Item *back() { return last_; }
+
+ void append_front(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else if (!l->empty()) {
+ l->last_->next = first_;
+ first_ = l->first_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else {
+ last_->next = l->first_;
+ last_ = l->last_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void CheckConsistency() {
+ if (size_ == 0) {
+ CHECK_EQ(first_, 0);
+ CHECK_EQ(last_, 0);
+ } else {
+ uptr count = 0;
+ for (Item *i = first_; ; i = i->next) {
+ count++;
+ if (i == last_) break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(last_->next, 0);
+ }
+ }
+
+// private, don't use directly.
+ uptr size_;
+ Item *first_;
+ Item *last_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIST_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mac.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mac.cc
new file mode 100644
index 000000000..d7885bb35
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -0,0 +1,320 @@
+//===-- sanitizer_mac.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements mac-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
+// the clients will most certainly use 64-bit ones as well.
+#ifndef _DARWIN_USE_64_BIT_INODE
+#define _DARWIN_USE_64_BIT_INODE 1
+#endif
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+#include <crt_externs.h> // for _NSGetEnviron
+#include <fcntl.h>
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <libkern/OSAtomic.h>
+
+namespace __sanitizer {
+
+// ---------------------- sanitizer_libc.h
+void *internal_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, u64 offset) {
+ return mmap(addr, length, prot, flags, fd, offset);
+}
+
+int internal_munmap(void *addr, uptr length) {
+ return munmap(addr, length);
+}
+
+int internal_close(fd_t fd) {
+ return close(fd);
+}
+
+fd_t internal_open(const char *filename, int flags) {
+ return open(filename, flags);
+}
+
+fd_t internal_open(const char *filename, int flags, u32 mode) {
+ return open(filename, flags, mode);
+}
+
+fd_t OpenFile(const char *filename, bool write) {
+ return internal_open(filename,
+ write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ return read(fd, buf, count);
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ return write(fd, buf, count);
+}
+
+int internal_stat(const char *path, void *buf) {
+ return stat(path, (struct stat *)buf);
+}
+
+int internal_lstat(const char *path, void *buf) {
+ return lstat(path, (struct stat *)buf);
+}
+
+int internal_fstat(fd_t fd, void *buf) {
+ return fstat(fd, (struct stat *)buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ return dup2(oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return readlink(path, buf, bufsize);
+}
+
+int internal_sched_yield() {
+ return sched_yield();
+}
+
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetTid() {
+ return reinterpret_cast<uptr>(pthread_self());
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ void *stackaddr = pthread_get_stackaddr_np(pthread_self());
+ *stack_top = (uptr)stackaddr;
+ *stack_bottom = *stack_top - stacksize;
+}
+
+const char *GetEnv(const char *name) {
+ char ***env_ptr = _NSGetEnviron();
+ CHECK(env_ptr);
+ char **environ = *env_ptr;
+ CHECK(environ);
+ uptr name_len = internal_strlen(name);
+ while (*environ != 0) {
+ uptr len = internal_strlen(*environ);
+ if (len > name_len) {
+ const char *p = *environ;
+ if (!internal_memcmp(p, name, name_len) &&
+ p[name_len] == '=') { // Match.
+ return *environ + name_len + 1; // String starting after =.
+ }
+ }
+ environ++;
+ }
+ return 0;
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
+
+// ----------------- sanitizer_procmaps.h
+
+MemoryMappingLayout::MemoryMappingLayout() {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ current_image_ = _dyld_image_count();
+ current_load_cmd_count_ = -1;
+ current_load_cmd_addr_ = 0;
+ current_magic_ = 0;
+ current_filetype_ = 0;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, http://code.google.com/p/google-perftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template<u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(
+ uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ const char* lc = current_load_cmd_addr_;
+ current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ if (start) *start = sc->vmaddr + dlloff;
+ if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
+ if (offset) {
+ if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
+ *offset = sc->vmaddr;
+ } else {
+ *offset = sc->fileoff;
+ }
+ }
+ if (filename) {
+ internal_strncpy(filename, _dyld_get_image_name(current_image_),
+ filename_size);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size) {
+ for (; current_image_ >= 0; current_image_--) {
+ const mach_header* hdr = _dyld_get_image_header(current_image_);
+ if (!hdr) continue;
+ if (current_load_cmd_count_ < 0) {
+ // Set up for this image;
+ current_load_cmd_count_ = hdr->ncmds;
+ current_magic_ = hdr->magic;
+ current_filetype_ = hdr->filetype;
+ switch (current_magic_) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ }
+
+ for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
+ switch (current_magic_) {
+ // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ start, end, offset, filename, filename_size))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ start, end, offset, filename, filename_size))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[],
+ uptr filename_size) {
+ return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
+}
+
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ // We assume that OS_SPINLOCK_INIT is zero
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK(OS_SPINLOCK_INIT == 0);
+ CHECK(owner_ != (uptr)pthread_self());
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+ CHECK(!owner_);
+ owner_ = (uptr)pthread_self();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == (uptr)pthread_self());
+ owner_ = 0;
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
+}
+
+} // namespace __sanitizer
+
+#endif // __APPLE__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mutex.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mutex.h
new file mode 100644
index 000000000..27009118e
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -0,0 +1,121 @@
+//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MUTEX_H
+#define SANITIZER_MUTEX_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+class StaticSpinMutex {
+ public:
+ void Init() {
+ atomic_store(&state_, 0, memory_order_relaxed);
+ }
+
+ void Lock() {
+ if (TryLock())
+ return;
+ LockSlow();
+ }
+
+ bool TryLock() {
+ return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
+ }
+
+ void Unlock() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
+
+ private:
+ atomic_uint8_t state_;
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ if (atomic_load(&state_, memory_order_relaxed) == 0
+ && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ }
+ }
+};
+
+class SpinMutex : public StaticSpinMutex {
+ public:
+ SpinMutex() {
+ Init();
+ }
+
+ private:
+ SpinMutex(const SpinMutex&);
+ void operator=(const SpinMutex&);
+};
+
+class BlockingMutex {
+ public:
+ explicit BlockingMutex(LinkerInitialized);
+ void Lock();
+ void Unlock();
+ private:
+ uptr opaque_storage_[10];
+ uptr owner_; // for debugging
+};
+
+template<typename MutexType>
+class GenericScopedLock {
+ public:
+ explicit GenericScopedLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->Lock();
+ }
+
+ ~GenericScopedLock() {
+ mu_->Unlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedLock(const GenericScopedLock&);
+ void operator=(const GenericScopedLock&);
+};
+
+template<typename MutexType>
+class GenericScopedReadLock {
+ public:
+ explicit GenericScopedReadLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->ReadLock();
+ }
+
+ ~GenericScopedReadLock() {
+ mu_->ReadUnlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedReadLock(const GenericScopedReadLock&);
+ void operator=(const GenericScopedReadLock&);
+};
+
+typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
+typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MUTEX_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_placement_new.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_placement_new.h
new file mode 100644
index 000000000..e32d65702
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_placement_new.h
@@ -0,0 +1,31 @@
+//===-- sanitizer_placement_new.h -------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//
+// The file provides 'placement new'.
+// Do not include it into header files, only into source files.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLACEMENT_NEW_H
+#define SANITIZER_PLACEMENT_NEW_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+#if (SANITIZER_WORDSIZE == 64) || defined(__APPLE__)
+typedef uptr operator_new_ptr_type;
+#else
+typedef u32 operator_new_ptr_type;
+#endif
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_ptr_type sz, void *p) {
+ return p;
+}
+
+#endif // SANITIZER_PLACEMENT_NEW_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
new file mode 100644
index 000000000..9b40c0cc5
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -0,0 +1,46 @@
+//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macro telling whether sanitizer tools can/should intercept
+// given library functions on a given platform.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_internal_defs.h"
+
+#if !defined(_WIN32)
+# define SI_NOT_WINDOWS 1
+# include "sanitizer_platform_limits_posix.h"
+#else
+# define SI_NOT_WINDOWS 0
+#endif
+
+#if defined(__linux__) && !defined(ANDROID)
+# define SI_LINUX_NOT_ANDROID 1
+#else
+# define SI_LINUX_NOT_ANDROID 0
+#endif
+
+#if defined(__linux__)
+# define SI_LINUX 1
+#else
+# define SI_LINUX 0
+#endif
+
+# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
+
+# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
new file mode 100644
index 000000000..c4be1aa42
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -0,0 +1,68 @@
+//===-- sanitizer_platform_limits_posix.cc --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#if defined(__linux__) || defined(__APPLE__)
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+#include <dirent.h>
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <time.h>
+
+#if defined(__linux__)
+#include <sys/vfs.h>
+#include <sys/epoll.h>
+#endif // __linux__
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+
+#if defined(__linux__)
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_dirent_sz = sizeof(struct dirent);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
+#endif // __linux__
+
+#if defined(__linux__) && !defined(__ANDROID__)
+ unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif // __linux__ && !__ANDROID__
+
+ void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx) {
+ return ((struct msghdr *)msg)->msg_iov[idx].iov_base;
+ }
+
+ uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx) {
+ return ((struct msghdr *)msg)->msg_iov[idx].iov_len;
+ }
+
+ uptr __sanitizer_get_msghdr_iovlen(void* msg) {
+ return ((struct msghdr *)msg)->msg_iovlen;
+ }
+
+ uptr __sanitizer_get_socklen_t(void* socklen_ptr) {
+ return *(socklen_t*)socklen_ptr;
+ }
+} // namespace __sanitizer
+
+#endif // __linux__ || __APPLE__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
new file mode 100644
index 000000000..dd53da94b
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -0,0 +1,41 @@
+//===-- sanitizer_platform_limits_posix.h ---------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
+#define SANITIZER_PLATFORM_LIMITS_POSIX_H
+
+namespace __sanitizer {
+ extern unsigned struct_utsname_sz;
+ extern unsigned struct_stat_sz;
+ extern unsigned struct_stat64_sz;
+ extern unsigned struct_rusage_sz;
+ extern unsigned struct_tm_sz;
+
+#if defined(__linux__)
+ extern unsigned struct_rlimit_sz;
+ extern unsigned struct_dirent_sz;
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_epoll_event_sz;
+#endif // __linux__
+
+#if defined(__linux__) && !defined(__ANDROID__)
+ extern unsigned struct_rlimit64_sz;
+ extern unsigned struct_statfs64_sz;
+#endif // __linux__ && !__ANDROID__
+
+ void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx);
+ uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx);
+ uptr __sanitizer_get_msghdr_iovlen(void* msg);
+ uptr __sanitizer_get_socklen_t(void* socklen_ptr);
+} // namespace __sanitizer
+
+#endif
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_posix.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_posix.cc
new file mode 100644
index 000000000..1c6ff0a2e
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -0,0 +1,228 @@
+//===-- sanitizer_posix.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements POSIX-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#if defined(__linux__) || defined(__APPLE__)
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// ------------- sanitizer_common.h
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+uptr GetMmapGranularity() {
+ return GetPageSize();
+}
+
+int GetPid() {
+ return getpid();
+}
+
+u32 GetUid() {
+ return getuid();
+}
+
+uptr GetThreadSelf() {
+ return (uptr)pthread_self();
+}
+
+void *MmapOrDie(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = internal_mmap(0, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (res == (void*)-1) {
+ static int recursion_count;
+ if (recursion_count) {
+ // The Report() and CHECK calls below may call mmap recursively and fail.
+ // If we went into recursion, just die.
+ RawWrite("ERROR: Failed to mmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %s\n",
+ SanitizerToolName, size, size, mem_type, strerror(errno));
+ DumpProcessMap();
+ CHECK("unable to mmap" && 0);
+ }
+ return res;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ int res = internal_munmap(addr, size);
+ if (res != 0) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
+ -1, 0);
+ if (p == (void*)-1)
+ Report("ERROR: "
+ "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ SanitizerToolName, size, size, fixed_addr, errno);
+ return p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1, 0);
+ if (p == (void*)-1) {
+ Report("ERROR:"
+ " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ SanitizerToolName, size, size, fixed_addr, errno);
+ CHECK("unable to mmap" && 0);
+ }
+ return p;
+}
+
+void *Mprotect(uptr fixed_addr, uptr size) {
+ return internal_mmap((void*)fixed_addr, size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
+ -1, 0);
+}
+
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ madvise((void*)addr, size, MADV_DONTNEED);
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ fd_t fd = OpenFile(file_name, false);
+ CHECK_NE(fd, kInvalidFd);
+ uptr fsize = internal_filesize(fd);
+ CHECK_NE(fsize, (uptr)-1);
+ CHECK_GT(fsize, 0);
+ *buff_size = RoundUpTo(fsize, GetPageSizeCached());
+ void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return (map == MAP_FAILED) ? 0 : map;
+}
+
+
+static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
+ uptr start2, uptr end2) {
+ CHECK(start1 <= end1);
+ CHECK(start2 <= end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
+// FIXME: this is thread-unsafe, but should not cause problems most of the time.
+// When the shadow is mapped only a single thread usually exists (plus maybe
+// several worker threads on Mac, which aren't expected to map big chunks of
+// memory).
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ MemoryMappingLayout procmaps;
+ uptr start, end;
+ while (procmaps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0)) {
+ if (!IntervalsAreSeparate(start, end, range_start, range_end))
+ return false;
+ }
+ return true;
+}
+
+void DumpProcessMap() {
+ MemoryMappingLayout proc_maps;
+ uptr start, end;
+ const sptr kBufSize = 4095;
+ char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
+ Report("Process memory map follows:\n");
+ while (proc_maps.Next(&start, &end, /* file_offset */0,
+ filename, kBufSize)) {
+ Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
+ }
+ Report("End of process memory map.\n");
+ UnmapOrDie(filename, kBufSize);
+}
+
+const char *GetPwd() {
+ return GetEnv("PWD");
+}
+
+void DisableCoreDumper() {
+ struct rlimit nocore;
+ nocore.rlim_cur = 0;
+ nocore.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, &nocore);
+}
+
+bool StackSizeIsUnlimited() {
+ struct rlimit rlim;
+ CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
+ return (rlim.rlim_cur == (uptr)-1);
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ struct rlimit rlim;
+ rlim.rlim_cur = limit;
+ rlim.rlim_max = limit;
+ if (setrlimit(RLIMIT_STACK, &rlim)) {
+ Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+ CHECK(!StackSizeIsUnlimited());
+}
+
+void SleepForSeconds(int seconds) {
+ sleep(seconds);
+}
+
+void SleepForMillis(int millis) {
+ usleep(millis * 1000);
+}
+
+void Abort() {
+ abort();
+}
+
+int Atexit(void (*function)(void)) {
+#ifndef SANITIZER_GO
+ return atexit(function);
+#else
+ return 0;
+#endif
+}
+
+int internal_isatty(fd_t fd) {
+ return isatty(fd);
+}
+
+} // namespace __sanitizer
+
+#endif // __linux__ || __APPLE_
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_printf.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_printf.cc
new file mode 100644
index 000000000..7771e1d34
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -0,0 +1,217 @@
+//===-- sanitizer_printf.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Internal printf function, used inside run-time libraries.
+// We can't use libc printf because we intercept some of the functions used
+// inside it.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+namespace __sanitizer {
+
+static int AppendChar(char **buff, const char *buff_end, char c) {
+ if (*buff < buff_end) {
+ **buff = c;
+ (*buff)++;
+ }
+ return 1;
+}
+
+// Appends number in a given base to buffer. If its length is less than
+// "minimal_num_length", it is padded with leading zeroes.
+static int AppendUnsigned(char **buff, const char *buff_end, u64 num,
+ u8 base, u8 minimal_num_length) {
+ uptr const kMaxLen = 30;
+ RAW_CHECK(base == 10 || base == 16);
+ RAW_CHECK(minimal_num_length < kMaxLen);
+ uptr num_buffer[kMaxLen];
+ uptr pos = 0;
+ do {
+ RAW_CHECK_MSG(pos < kMaxLen, "appendNumber buffer overflow");
+ num_buffer[pos++] = num % base;
+ num /= base;
+ } while (num > 0);
+ if (pos < minimal_num_length) {
+ // Make sure compiler doesn't insert call to memset here.
+ internal_memset(&num_buffer[pos], 0,
+ sizeof(num_buffer[0]) * (minimal_num_length - pos));
+ pos = minimal_num_length;
+ }
+ int result = 0;
+ while (pos-- > 0) {
+ uptr digit = num_buffer[pos];
+ result += AppendChar(buff, buff_end, (digit < 10) ? '0' + digit
+ : 'a' + digit - 10);
+ }
+ return result;
+}
+
+static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
+ u8 minimal_num_length) {
+ int result = 0;
+ if (num < 0) {
+ result += AppendChar(buff, buff_end, '-');
+ num = -num;
+ if (minimal_num_length)
+ --minimal_num_length;
+ }
+ result += AppendUnsigned(buff, buff_end, (u64)num, 10, minimal_num_length);
+ return result;
+}
+
+static int AppendString(char **buff, const char *buff_end, const char *s) {
+ if (s == 0)
+ s = "<null>";
+ int result = 0;
+ for (; *s; s++) {
+ result += AppendChar(buff, buff_end, *s);
+ }
+ return result;
+}
+
+static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
+ int result = 0;
+ result += AppendString(buff, buff_end, "0x");
+ result += AppendUnsigned(buff, buff_end, ptr_value, 16,
+ (SANITIZER_WORDSIZE == 64) ? 12 : 8);
+ return result;
+}
+
+int VSNPrintf(char *buff, int buff_length,
+ const char *format, va_list args) {
+ static const char *kPrintfFormatsHelp =
+ "Supported Printf formats: %(0[0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
+ RAW_CHECK(format);
+ RAW_CHECK(buff_length > 0);
+ const char *buff_end = &buff[buff_length - 1];
+ const char *cur = format;
+ int result = 0;
+ for (; *cur; cur++) {
+ if (*cur != '%') {
+ result += AppendChar(&buff, buff_end, *cur);
+ continue;
+ }
+ cur++;
+ bool have_width = (*cur == '0');
+ int width = 0;
+ if (have_width) {
+ while (*cur >= '0' && *cur <= '9') {
+ have_width = true;
+ width = width * 10 + *cur++ - '0';
+ }
+ }
+ bool have_z = (*cur == 'z');
+ cur += have_z;
+ bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ cur += have_ll * 2;
+ s64 dval;
+ u64 uval;
+ bool have_flags = have_width | have_z | have_ll;
+ switch (*cur) {
+ case 'd': {
+ dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
+ result += AppendSignedDecimal(&buff, buff_end, dval, width);
+ break;
+ }
+ case 'u':
+ case 'x': {
+ uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
+ result += AppendUnsigned(&buff, buff_end, uval,
+ (*cur == 'u') ? 10 : 16, width);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendString(&buff, buff_end, va_arg(args, char*));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, va_arg(args, int));
+ break;
+ }
+ case '%' : {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(buff <= buff_end);
+ AppendChar(&buff, buff_end + 1, '\0');
+ return result;
+}
+
+static void (*PrintfAndReportCallback)(const char *);
+void SetPrintfAndReportCallback(void (*callback)(const char *)) {
+ PrintfAndReportCallback = callback;
+}
+
+void Printf(const char *format, ...) {
+ const int kLen = 16 * 1024;
+ InternalScopedBuffer<char> buffer(kLen);
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer.data(), kLen, format, args);
+ va_end(args);
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n");
+ RawWrite(buffer.data());
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(buffer.data());
+}
+
+// Writes at most "length" symbols to "buffer" (including trailing '\0').
+// Returns the number of symbols that should have been written to buffer
+// (not including trailing '\0'). Thus, the string is truncated
+// iff return value is not less than "length".
+int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer, length, format, args);
+ va_end(args);
+ return needed_length;
+}
+
+// Like Printf, but prints the current PID before the output string.
+void Report(const char *format, ...) {
+ const int kLen = 16 * 1024;
+ InternalScopedBuffer<char> buffer(kLen);
+ int needed_length = internal_snprintf(buffer.data(),
+ kLen, "==%d== ", GetPid());
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ va_list args;
+ va_start(args, format);
+ needed_length += VSNPrintf(buffer.data() + needed_length,
+ kLen - needed_length, format, args);
+ va_end(args);
+ RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ RawWrite(buffer.data());
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(buffer.data());
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_procmaps.h
new file mode 100644
index 000000000..400fd7a8b
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -0,0 +1,110 @@
+//===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Information about the process mappings.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PROCMAPS_H
+#define SANITIZER_PROCMAPS_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+#ifdef _WIN32
+class MemoryMappingLayout {
+ public:
+ MemoryMappingLayout() {}
+ bool GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size) {
+ UNIMPLEMENTED();
+ }
+};
+
+#else // _WIN32
+#if defined(__linux__)
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+#endif // defined(__linux__)
+
+class MemoryMappingLayout {
+ public:
+ MemoryMappingLayout();
+ bool Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size);
+ void Reset();
+ // Gets the object file name and the offset in that object for a given
+ // address 'addr'. Returns true on success.
+ bool GetObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size);
+ // In some cases, e.g. when running under a sandbox on Linux, ASan is unable
+ // to obtain the memory mappings. It should fall back to pre-cached data
+ // instead of aborting.
+ static void CacheMemoryMappings();
+ ~MemoryMappingLayout();
+
+ private:
+ void LoadFromCache();
+ // Default implementation of GetObjectNameAndOffset.
+ // Quite slow, because it iterates through the whole process map for each
+ // lookup.
+ bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
+ char filename[], uptr filename_size) {
+ Reset();
+ uptr start, end, file_offset;
+ for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size);
+ i++) {
+ if (addr >= start && addr < end) {
+ // Don't subtract 'start' for the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ *offset = (addr - (i ? start : 0)) + file_offset;
+ return true;
+ }
+ }
+ if (filename_size)
+ filename[0] = '\0';
+ return false;
+ }
+
+# if defined __linux__
+ ProcSelfMapsBuff proc_self_maps_;
+ char *current_;
+
+ // Static mappings cache.
+ static ProcSelfMapsBuff cached_proc_self_maps_;
+ static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
+# elif defined __APPLE__
+ template<u32 kLCSegment, typename SegmentCommand>
+ bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size);
+ int current_image_;
+ u32 current_magic_;
+ u32 current_filetype_;
+ int current_load_cmd_count_;
+ char *current_load_cmd_addr_;
+# endif
+};
+
+#endif // _WIN32
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PROCMAPS_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_quarantine.h
new file mode 100644
index 000000000..f3ad91a9c
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_quarantine.h
@@ -0,0 +1,170 @@
+//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Memory quarantine for AddressSanitizer and potentially other tools.
+// Quarantine caches some specified amount of memory in per-thread caches,
+// then evicts to global FIFO queue. When the queue reaches specified threshold,
+// oldest memory is recycled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_QUARANTINE_H
+#define SANITIZER_QUARANTINE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_list.h"
+
+namespace __sanitizer {
+
+template<typename Node> class QuarantineCache;
+
+struct QuarantineBatch {
+ static const uptr kSize = 1024;
+ QuarantineBatch *next;
+ uptr size;
+ uptr count;
+ void *batch[kSize];
+};
+
+// The callback interface is:
+// void Callback::Recycle(Node *ptr);
+// void *cb.Allocate(uptr size);
+// void cb.Deallocate(void *ptr);
+template<typename Callback, typename Node>
+class Quarantine {
+ public:
+ typedef QuarantineCache<Callback> Cache;
+
+ explicit Quarantine(LinkerInitialized)
+ : cache_(LINKER_INITIALIZED) {
+ }
+
+ void Init(uptr size, uptr cache_size) {
+ max_size_ = size;
+ min_size_ = size / 10 * 9; // 90% of max size.
+ max_cache_size_ = cache_size;
+ }
+
+ void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
+ c->Enqueue(cb, ptr, size);
+ if (c->Size() > max_cache_size_)
+ Drain(c, cb);
+ }
+
+ void NOINLINE Drain(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
+ Recycle(cb);
+ }
+
+ private:
+ // Read-only data.
+ char pad0_[kCacheLineSize];
+ uptr max_size_;
+ uptr min_size_;
+ uptr max_cache_size_;
+ char pad1_[kCacheLineSize];
+ SpinMutex cache_mutex_;
+ SpinMutex recycle_mutex_;
+ Cache cache_;
+ char pad2_[kCacheLineSize];
+
+ void NOINLINE Recycle(Callback cb) {
+ Cache tmp;
+ {
+ SpinMutexLock l(&cache_mutex_);
+ while (cache_.Size() > min_size_) {
+ QuarantineBatch *b = cache_.DequeueBatch();
+ tmp.EnqueueBatch(b);
+ }
+ }
+ recycle_mutex_.Unlock();
+ DoRecycle(&tmp, cb);
+ }
+
+ void NOINLINE DoRecycle(Cache *c, Callback cb) {
+ while (QuarantineBatch *b = c->DequeueBatch()) {
+ const uptr kPrefetch = 16;
+ for (uptr i = 0; i < kPrefetch; i++)
+ PREFETCH(b->batch[i]);
+ for (uptr i = 0; i < b->count; i++) {
+ PREFETCH(b->batch[i + kPrefetch]);
+ cb.Recycle((Node*)b->batch[i]);
+ }
+ cb.Deallocate(b);
+ }
+ }
+};
+
+// Per-thread cache of memory blocks.
+template<typename Callback>
+class QuarantineCache {
+ public:
+ explicit QuarantineCache(LinkerInitialized) {
+ }
+
+ QuarantineCache()
+ : size_() {
+ list_.clear();
+ }
+
+ uptr Size() const {
+ return atomic_load(&size_, memory_order_relaxed);
+ }
+
+ void Enqueue(Callback cb, void *ptr, uptr size) {
+ if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
+ AllocBatch(cb);
+ QuarantineBatch *b = list_.back();
+ b->batch[b->count++] = ptr;
+ b->size += size;
+ SizeAdd(size);
+ }
+
+ void Transfer(QuarantineCache *c) {
+ list_.append_back(&c->list_);
+ SizeAdd(c->Size());
+ atomic_store(&c->size_, 0, memory_order_relaxed);
+ }
+
+ void EnqueueBatch(QuarantineBatch *b) {
+ list_.push_back(b);
+ SizeAdd(b->size);
+ }
+
+ QuarantineBatch *DequeueBatch() {
+ if (list_.empty())
+ return 0;
+ QuarantineBatch *b = list_.front();
+ list_.pop_front();
+ SizeAdd(-b->size);
+ return b;
+ }
+
+ private:
+ IntrusiveList<QuarantineBatch> list_;
+ atomic_uintptr_t size_;
+
+ void SizeAdd(uptr add) {
+ atomic_store(&size_, Size() + add, memory_order_relaxed);
+ }
+
+ NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ b->count = 0;
+ b->size = 0;
+ list_.push_back(b);
+ return b;
+ }
+};
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_QUARANTINE_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_report_decorator.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
new file mode 100644
index 000000000..17f0b2edd
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
@@ -0,0 +1,35 @@
+//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tags to decorate the sanitizer reports.
+// Currently supported tags:
+// * None.
+// * ANSI color sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+namespace __sanitizer {
+class AnsiColorDecorator {
+ public:
+ explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
+ const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
+ private:
+ bool ansi_;
+};
+} // namespace __sanitizer
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
new file mode 100644
index 000000000..2e22155fa
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
@@ -0,0 +1,202 @@
+//===-- sanitizer_stackdepot.cc -------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stackdepot.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+const int kTabSize = 1024 * 1024; // Hash table size.
+const int kPartBits = 8;
+const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
+const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
+const int kPartSize = kTabSize / kPartCount;
+const int kMaxId = 1 << kPartShift;
+
+struct StackDesc {
+ StackDesc *link;
+ u32 id;
+ u32 hash;
+ uptr size;
+ uptr stack[1]; // [size]
+};
+
+static struct {
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
+ atomic_uintptr_t region_end;
+ atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+} depot;
+
+static StackDepotStats stats;
+
+StackDepotStats *StackDepotGetStats() {
+ return &stats;
+}
+
+static u32 hash(const uptr *stack, uptr size) {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (size * sizeof(uptr));
+ for (uptr i = 0; i < size; i++) {
+ u32 k = stack[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+static StackDesc *tryallocDesc(uptr memsz) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
+ uptr end = atomic_load(&depot.region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + memsz > end)
+ return 0;
+ if (atomic_compare_exchange_weak(
+ &depot.region_pos, &cmp, cmp + memsz,
+ memory_order_acquire))
+ return (StackDesc*)cmp;
+ }
+}
+
+static StackDesc *allocDesc(uptr size) {
+ // First, try to allocate optimisitically.
+ uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
+ StackDesc *s = tryallocDesc(memsz);
+ if (s)
+ return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&depot.mtx);
+ for (;;) {
+ s = tryallocDesc(memsz);
+ if (s)
+ return s;
+ atomic_store(&depot.region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < memsz)
+ allocsz = memsz;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ stats.mapped += allocsz;
+ atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
+ atomic_store(&depot.region_pos, mem, memory_order_release);
+ }
+}
+
+static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->hash == hash && s->size == size) {
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != s->stack[i])
+ break;
+ }
+ if (i == size)
+ return s->id;
+ }
+ }
+ return 0;
+}
+
+static StackDesc *lock(atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0
+ && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
+ memory_order_acquire))
+ return (StackDesc*)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+static void unlock(atomic_uintptr_t *p, StackDesc *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+u32 StackDepotPut(const uptr *stack, uptr size) {
+ if (stack == 0 || size == 0)
+ return 0;
+ uptr h = hash(stack, size);
+ atomic_uintptr_t *p = &depot.tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ StackDesc *s = (StackDesc*)(v & ~1);
+ // First, try to find the existing stack.
+ u32 id = find(s, stack, size, h);
+ if (id)
+ return id;
+ // If failed, lock, retry and insert new.
+ StackDesc *s2 = lock(p);
+ if (s2 != s) {
+ id = find(s2, stack, size, h);
+ if (id) {
+ unlock(p, s2);
+ return id;
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (1u << 31), 0);
+ s = allocDesc(size);
+ s->id = id;
+ s->hash = h;
+ s->size = size;
+ internal_memcpy(s->stack, stack, size * sizeof(uptr));
+ s->link = s2;
+ unlock(p, s);
+ return id;
+}
+
+const uptr *StackDepotGet(u32 id, uptr *size) {
+ if (id == 0)
+ return 0;
+ CHECK_EQ(id & (1u << 31), 0);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &depot.tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ StackDesc *s = (StackDesc*)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ *size = s->size;
+ return s->stack;
+ }
+ }
+ }
+ *size = 0;
+ return 0;
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
new file mode 100644
index 000000000..bf73cf14a
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
@@ -0,0 +1,34 @@
+//===-- sanitizer_stackdepot.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOT_H
+#define SANITIZER_STACKDEPOT_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// StackDepot efficiently stores huge amounts of stack traces.
+
+// Maps stack trace to an unique id.
+u32 StackDepotPut(const uptr *stack, uptr size);
+// Retrieves a stored stack trace by the id.
+const uptr *StackDepotGet(u32 id, uptr *size);
+
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr mapped;
+};
+
+StackDepotStats *StackDepotGetStats();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOT_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
new file mode 100644
index 000000000..e14ea4472
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -0,0 +1,262 @@
+//===-- sanitizer_stacktrace.cc -------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix) {
+ if (filepath == 0) return 0;
+ if (filepath == internal_strstr(filepath, strip_file_prefix))
+ return filepath + internal_strlen(strip_file_prefix);
+ return filepath;
+}
+
+// ----------------------- StackTrace ----------------------------- {{{1
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#ifdef __arm__
+ // Cancel Thumb bit.
+ pc = pc & (~1);
+#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
+static void PrintStackFramePrefix(uptr frame_num, uptr pc) {
+ Printf(" #%zu 0x%zx", frame_num, pc);
+}
+
+static void PrintSourceLocation(const char *file, int line, int column,
+ const char *strip_file_prefix) {
+ CHECK(file);
+ Printf(" %s", StripPathPrefix(file, strip_file_prefix));
+ if (line > 0) {
+ Printf(":%d", line);
+ if (column > 0)
+ Printf(":%d", column);
+ }
+}
+
+static void PrintModuleAndOffset(const char *module, uptr offset,
+ const char *strip_file_prefix) {
+ Printf(" (%s+0x%zx)", StripPathPrefix(module, strip_file_prefix), offset);
+}
+
+void StackTrace::PrintStack(const uptr *addr, uptr size,
+ bool symbolize, const char *strip_file_prefix,
+ SymbolizeCallback symbolize_callback ) {
+ MemoryMappingLayout proc_maps;
+ InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
+ InternalScopedBuffer<AddressInfo> addr_frames(64);
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && addr[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(addr[i]);
+ uptr addr_frames_num = 0; // The number of stack frames for current
+ // instruction address.
+ if (symbolize_callback) {
+ if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
+ addr_frames_num = 1;
+ PrintStackFramePrefix(frame_num, pc);
+ // We can't know anything about the string returned by external
+ // symbolizer, but if it starts with filename, try to strip path prefix
+ // from it.
+ Printf(" %s\n", StripPathPrefix(buff.data(), strip_file_prefix));
+ frame_num++;
+ }
+ }
+ if (symbolize && addr_frames_num == 0) {
+ // Use our own (online) symbolizer, if necessary.
+ addr_frames_num = SymbolizeCode(pc, addr_frames.data(),
+ addr_frames.size());
+ for (uptr j = 0; j < addr_frames_num; j++) {
+ AddressInfo &info = addr_frames[j];
+ PrintStackFramePrefix(frame_num, pc);
+ if (info.function) {
+ Printf(" in %s", info.function);
+ }
+ if (info.file) {
+ PrintSourceLocation(info.file, info.line, info.column,
+ strip_file_prefix);
+ } else if (info.module) {
+ PrintModuleAndOffset(info.module, info.module_offset,
+ strip_file_prefix);
+ }
+ Printf("\n");
+ info.Clear();
+ frame_num++;
+ }
+ }
+ if (addr_frames_num == 0) {
+ // If online symbolization failed, try to output at least module and
+ // offset for instruction.
+ PrintStackFramePrefix(frame_num, pc);
+ uptr offset;
+ if (proc_maps.GetObjectNameAndOffset(pc, &offset,
+ buff.data(), buff.size())) {
+ PrintModuleAndOffset(buff.data(), offset, strip_file_prefix);
+ }
+ Printf("\n");
+ frame_num++;
+ }
+ }
+}
+
+uptr StackTrace::GetCurrentPc() {
+ return GET_CALLER_PC();
+}
+
+void StackTrace::FastUnwindStack(uptr pc, uptr bp,
+ uptr stack_top, uptr stack_bottom) {
+ CHECK(size == 0 && trace[0] == pc);
+ size = 1;
+ uhwptr *frame = (uhwptr *)bp;
+ uhwptr *prev_frame = frame - 1;
+ // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
+ while (frame > prev_frame &&
+ frame < (uhwptr *)stack_top - 2 &&
+ frame > (uhwptr *)stack_bottom &&
+ size < max_size) {
+ uhwptr pc1 = frame[1];
+ if (pc1 != pc) {
+ trace[size++] = (uptr) pc1;
+ }
+ prev_frame = frame;
+ frame = (uhwptr *)frame[0];
+ }
+}
+
+void StackTrace::PopStackFrames(uptr count) {
+ CHECK(size >= count);
+ size -= count;
+ for (uptr i = 0; i < size; i++) {
+ trace[i] = trace[i + count];
+ }
+}
+
+// On 32-bits we don't compress stack traces.
+// On 64-bits we compress stack traces: if a given pc differes slightly from
+// the previous one, we record a 31-bit offset instead of the full pc.
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr StackTrace::CompressStack(StackTrace *stack, u32 *compressed, uptr size) {
+#if SANITIZER_WORDSIZE == 32
+ // Don't compress, just copy.
+ uptr res = 0;
+ for (uptr i = 0; i < stack->size && i < size; i++) {
+ compressed[i] = stack->trace[i];
+ res++;
+ }
+ if (stack->size < size)
+ compressed[stack->size] = 0;
+#else // 64 bits, compress.
+ uptr prev_pc = 0;
+ const uptr kMaxOffset = (1ULL << 30) - 1;
+ uptr c_index = 0;
+ uptr res = 0;
+ for (uptr i = 0, n = stack->size; i < n; i++) {
+ uptr pc = stack->trace[i];
+ if (!pc) break;
+ if ((s64)pc < 0) break;
+ // Printf("C pc[%zu] %zx\n", i, pc);
+ if (prev_pc - pc < kMaxOffset || pc - prev_pc < kMaxOffset) {
+ uptr offset = (s64)(pc - prev_pc);
+ offset |= (1U << 31);
+ if (c_index >= size) break;
+ // Printf("C co[%zu] offset %zx\n", i, offset);
+ compressed[c_index++] = offset;
+ } else {
+ uptr hi = pc >> 32;
+ uptr lo = (pc << 32) >> 32;
+ CHECK_EQ((hi & (1 << 31)), 0);
+ if (c_index + 1 >= size) break;
+ // Printf("C co[%zu] hi/lo: %zx %zx\n", c_index, hi, lo);
+ compressed[c_index++] = hi;
+ compressed[c_index++] = lo;
+ }
+ res++;
+ prev_pc = pc;
+ }
+ if (c_index < size)
+ compressed[c_index] = 0;
+ if (c_index + 1 < size)
+ compressed[c_index + 1] = 0;
+#endif // SANITIZER_WORDSIZE
+
+ // debug-only code
+#if 0
+ StackTrace check_stack;
+ UncompressStack(&check_stack, compressed, size);
+ if (res < check_stack.size) {
+ Printf("res %zu check_stack.size %zu; c_size %zu\n", res,
+ check_stack.size, size);
+ }
+ // |res| may be greater than check_stack.size, because
+ // UncompressStack(CompressStack(stack)) eliminates the 0x0 frames.
+ CHECK(res >= check_stack.size);
+ CHECK_EQ(0, REAL(memcmp)(check_stack.trace, stack->trace,
+ check_stack.size * sizeof(uptr)));
+#endif
+
+ return res;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void StackTrace::UncompressStack(StackTrace *stack,
+ u32 *compressed, uptr size) {
+#if SANITIZER_WORDSIZE == 32
+ // Don't uncompress, just copy.
+ stack->size = 0;
+ for (uptr i = 0; i < size && i < kStackTraceMax; i++) {
+ if (!compressed[i]) break;
+ stack->size++;
+ stack->trace[i] = compressed[i];
+ }
+#else // 64 bits, uncompress
+ uptr prev_pc = 0;
+ stack->size = 0;
+ for (uptr i = 0; i < size && stack->size < kStackTraceMax; i++) {
+ u32 x = compressed[i];
+ uptr pc = 0;
+ if (x & (1U << 31)) {
+ // Printf("U co[%zu] offset: %x\n", i, x);
+ // this is an offset
+ s32 offset = x;
+ offset = (offset << 1) >> 1; // remove the 31-byte and sign-extend.
+ pc = prev_pc + offset;
+ CHECK(pc);
+ } else {
+ // CHECK(i + 1 < size);
+ if (i + 1 >= size) break;
+ uptr hi = x;
+ uptr lo = compressed[i+1];
+ // Printf("U co[%zu] hi/lo: %zx %zx\n", i, hi, lo);
+ i++;
+ pc = (hi << 32) | lo;
+ if (!pc) break;
+ }
+ // Printf("U pc[%zu] %zx\n", stack->size, pc);
+ stack->trace[stack->size++] = pc;
+ prev_pc = pc;
+ }
+#endif // SANITIZER_WORDSIZE
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
new file mode 100644
index 000000000..fd0c4671a
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -0,0 +1,81 @@
+//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_H
+#define SANITIZER_STACKTRACE_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+static const uptr kStackTraceMax = 256;
+
+struct StackTrace {
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+ uptr size;
+ uptr max_size;
+ uptr trace[kStackTraceMax];
+ static void PrintStack(const uptr *addr, uptr size,
+ bool symbolize, const char *strip_file_prefix,
+ SymbolizeCallback symbolize_callback);
+ void CopyTo(uptr *dst, uptr dst_size) {
+ for (uptr i = 0; i < size && i < dst_size; i++)
+ dst[i] = trace[i];
+ for (uptr i = size; i < dst_size; i++)
+ dst[i] = 0;
+ }
+
+ void CopyFrom(uptr *src, uptr src_size) {
+ size = src_size;
+ if (size > kStackTraceMax) size = kStackTraceMax;
+ for (uptr i = 0; i < size; i++) {
+ trace[i] = src[i];
+ }
+ }
+
+ void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
+ void SlowUnwindStack(uptr pc, uptr max_depth);
+
+ void PopStackFrames(uptr count);
+
+ static uptr GetCurrentPc();
+ static uptr GetPreviousInstructionPc(uptr pc);
+
+ static uptr CompressStack(StackTrace *stack,
+ u32 *compressed, uptr size);
+ static void UncompressStack(StackTrace *stack,
+ u32 *compressed, uptr size);
+};
+
+
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix);
+
+} // namespace __sanitizer
+
+// Use this macro if you want to print stack trace with the caller
+// of the current function in the top frame.
+#define GET_CALLER_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+// Use this macro if you want to print stack trace with the current
+// function in the top frame.
+#define GET_CURRENT_PC_BP_SP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+
+#endif // SANITIZER_STACKTRACE_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
new file mode 100644
index 000000000..2c9cb2b0a
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -0,0 +1,408 @@
+//===-- sanitizer_symbolizer.cc -------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer_symbolizer.h for details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+void AddressInfo::Clear() {
+ InternalFree(module);
+ InternalFree(function);
+ InternalFree(file);
+ internal_memset(this, 0, sizeof(AddressInfo));
+}
+
+LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
+ full_name_ = internal_strdup(module_name);
+ base_address_ = base_address;
+ n_ranges_ = 0;
+}
+
+void LoadedModule::addAddressRange(uptr beg, uptr end) {
+ CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
+ ranges_[n_ranges_].beg = beg;
+ ranges_[n_ranges_].end = end;
+ n_ranges_++;
+}
+
+bool LoadedModule::containsAddress(uptr address) const {
+ for (uptr i = 0; i < n_ranges_; i++) {
+ if (ranges_[i].beg <= address && address < ranges_[i].end)
+ return true;
+ }
+ return false;
+}
+
+// Extracts the prefix of "str" that consists of any characters not
+// present in "delims" string, and copies this prefix to "result", allocating
+// space for it.
+// Returns a pointer to "str" after skipping extracted prefix and first
+// delimiter char.
+static const char *ExtractToken(const char *str, const char *delims,
+ char **result) {
+ uptr prefix_len = internal_strcspn(str, delims);
+ *result = (char*)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end++;
+ return prefix_end;
+}
+
+// Same as ExtractToken, but converts extracted token to integer.
+static const char *ExtractInt(const char *str, const char *delims,
+ int *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (int)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+static const char *ExtractUptr(const char *str, const char *delims,
+ uptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (uptr)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+// ExternalSymbolizer encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess,
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class ExternalSymbolizer {
+ public:
+ ExternalSymbolizer(const char *path, int input_fd, int output_fd)
+ : path_(path),
+ input_fd_(input_fd),
+ output_fd_(output_fd),
+ times_restarted_(0) {
+ CHECK(path_);
+ CHECK_NE(input_fd_, kInvalidFd);
+ CHECK_NE(output_fd_, kInvalidFd);
+ }
+
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ CHECK(module_name);
+ internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
+ return 0;
+ if (!readFromSymbolizer(buffer_, kBufferSize))
+ return 0;
+ return buffer_;
+ }
+
+ bool Restart() {
+ if (times_restarted_ >= kMaxTimesRestarted) return false;
+ times_restarted_++;
+ internal_close(input_fd_);
+ internal_close(output_fd_);
+ return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
+ }
+
+ private:
+ bool readFromSymbolizer(char *buffer, uptr max_length) {
+ if (max_length == 0)
+ return true;
+ uptr read_len = 0;
+ while (true) {
+ uptr just_read = internal_read(input_fd_, buffer + read_len,
+ max_length - read_len);
+ // We can't read 0 bytes, as we don't expect external symbolizer to close
+ // its stdout.
+ if (just_read == 0 || just_read == (uptr)-1) {
+ Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
+ return false;
+ }
+ read_len += just_read;
+ // Empty line marks the end of symbolizer output.
+ if (read_len >= 2 && buffer[read_len - 1] == '\n' &&
+ buffer[read_len - 2] == '\n') {
+ break;
+ }
+ }
+ return true;
+ }
+
+ bool writeToSymbolizer(const char *buffer, uptr length) {
+ if (length == 0)
+ return true;
+ uptr write_len = internal_write(output_fd_, buffer, length);
+ if (write_len == 0 || write_len == (uptr)-1) {
+ Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ return false;
+ }
+ return true;
+ }
+
+ const char *path_;
+ int input_fd_;
+ int output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ uptr times_restarted_;
+};
+
+static LowLevelAllocator symbolizer_allocator; // Linker initialized.
+
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+} // extern "C"
+
+class InternalSymbolizer {
+ public:
+ typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int);
+ static InternalSymbolizer *get() {
+ if (__sanitizer_symbolize_code != 0 &&
+ __sanitizer_symbolize_data != 0) {
+ void *mem = symbolizer_allocator.Allocate(sizeof(InternalSymbolizer));
+ return new(mem) InternalSymbolizer();
+ }
+ return 0;
+ }
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ SanitizerSymbolizeFn symbolize_fn = is_data ? __sanitizer_symbolize_data
+ : __sanitizer_symbolize_code;
+ if (symbolize_fn(module_name, module_offset, buffer_, kBufferSize))
+ return buffer_;
+ return 0;
+ }
+
+ private:
+ InternalSymbolizer() { }
+
+ static const int kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+
+class InternalSymbolizer {
+ public:
+ static InternalSymbolizer *get() { return 0; }
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ return 0;
+ }
+};
+
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+
+class Symbolizer {
+ public:
+ uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
+ if (max_frames == 0)
+ return 0;
+ LoadedModule *module = FindModuleForAddress(addr);
+ if (module == 0)
+ return 0;
+ const char *module_name = module->full_name();
+ uptr module_offset = addr - module->base_address();
+ const char *str = SendCommand(false, module_name, module_offset);
+ if (str == 0) {
+ // External symbolizer was not initialized or failed. Fill only data
+ // about module name and offset.
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ return 1;
+ }
+ uptr frame_id = 0;
+ for (frame_id = 0; frame_id < max_frames; frame_id++) {
+ AddressInfo *info = &frames[frame_id];
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ break;
+ }
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ info->function = function_name;
+ // Parse <file>:<line>:<column> buffer.
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+ const char *line_info = ExtractToken(file_line_info, ":", &info->file);
+ line_info = ExtractInt(line_info, ":", &info->line);
+ line_info = ExtractInt(line_info, "", &info->column);
+ InternalFree(file_line_info);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
+ }
+ if (frame_id == 0) {
+ // Make sure we return at least one frame.
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ frame_id = 1;
+ }
+ return frame_id;
+ }
+
+ bool SymbolizeData(uptr addr, DataInfo *info) {
+ LoadedModule *module = FindModuleForAddress(addr);
+ if (module == 0)
+ return false;
+ const char *module_name = module->full_name();
+ uptr module_offset = addr - module->base_address();
+ internal_memset(info, 0, sizeof(*info));
+ info->address = addr;
+ info->module = internal_strdup(module_name);
+ info->module_offset = module_offset;
+ const char *str = SendCommand(true, module_name, module_offset);
+ if (str == 0)
+ return true;
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+ info->start += module->base_address();
+ return true;
+ }
+
+ bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
+ int input_fd, output_fd;
+ if (!StartSymbolizerSubprocess(path_to_symbolizer, &input_fd, &output_fd))
+ return false;
+ void *mem = symbolizer_allocator.Allocate(sizeof(ExternalSymbolizer));
+ external_symbolizer_ = new(mem) ExternalSymbolizer(path_to_symbolizer,
+ input_fd, output_fd);
+ return true;
+ }
+
+ bool IsSymbolizerAvailable() {
+ if (internal_symbolizer_ == 0)
+ internal_symbolizer_ = InternalSymbolizer::get();
+ return internal_symbolizer_ || external_symbolizer_;
+ }
+
+ private:
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ // First, try to use internal symbolizer.
+ if (internal_symbolizer_ == 0) {
+ internal_symbolizer_ = InternalSymbolizer::get();
+ }
+ if (internal_symbolizer_) {
+ return internal_symbolizer_->SendCommand(is_data, module_name,
+ module_offset);
+ }
+ // Otherwise, fall back to external symbolizer.
+ if (external_symbolizer_ == 0) {
+ ReportExternalSymbolizerError(
+ "WARNING: Trying to symbolize code, but external "
+ "symbolizer is not initialized!\n");
+ return 0;
+ }
+ for (;;) {
+ char *reply = external_symbolizer_->SendCommand(is_data, module_name,
+ module_offset);
+ if (reply)
+ return reply;
+ // Try to restart symbolizer subprocess. If we don't succeed, forget
+ // about it and don't try to use it later.
+ if (!external_symbolizer_->Restart()) {
+ ReportExternalSymbolizerError(
+ "WARNING: Failed to use and restart external symbolizer!\n");
+ external_symbolizer_ = 0;
+ return 0;
+ }
+ }
+ }
+
+ LoadedModule *FindModuleForAddress(uptr address) {
+ if (modules_ == 0) {
+ modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
+ kMaxNumberOfModuleContexts * sizeof(LoadedModule)));
+ CHECK(modules_);
+ n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts);
+ CHECK_GT(n_modules_, 0);
+ CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
+ }
+ for (uptr i = 0; i < n_modules_; i++) {
+ if (modules_[i].containsAddress(address)) {
+ return &modules_[i];
+ }
+ }
+ return 0;
+ }
+ void ReportExternalSymbolizerError(const char *msg) {
+ // Don't use atomics here for now, as SymbolizeCode can't be called
+ // from multiple threads anyway.
+ static bool reported;
+ if (!reported) {
+ Report(msg);
+ reported = true;
+ }
+ }
+
+ // 16K loaded modules should be enough for everyone.
+ static const uptr kMaxNumberOfModuleContexts = 1 << 14;
+ LoadedModule *modules_; // Array of module descriptions is leaked.
+ uptr n_modules_;
+
+ ExternalSymbolizer *external_symbolizer_; // Leaked.
+ InternalSymbolizer *internal_symbolizer_; // Leaked.
+};
+
+static Symbolizer symbolizer; // Linker initialized.
+
+uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
+ return symbolizer.SymbolizeCode(address, frames, max_frames);
+}
+
+bool SymbolizeData(uptr address, DataInfo *info) {
+ return symbolizer.SymbolizeData(address, info);
+}
+
+bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
+ return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer);
+}
+
+bool IsSymbolizerAvailable() {
+ return symbolizer.IsSymbolizerAvailable();
+}
+
+} // namespace __sanitizer
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
new file mode 100644
index 000000000..751806e84
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -0,0 +1,112 @@
+//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbolizer is intended to be used by both
+// AddressSanitizer and ThreadSanitizer to symbolize a given
+// address. It is an analogue of addr2line utility and allows to map
+// instruction address to a location in source code at run-time.
+//
+// Symbolizer is planned to use debug information (in DWARF format)
+// in a binary via interface defined in "llvm/DebugInfo/DIContext.h"
+//
+// Symbolizer code should be called from the run-time library of
+// dynamic tools, and generally should not call memory allocation
+// routines or other system library functions intercepted by those tools.
+// Instead, Symbolizer code should use their replacements, defined in
+// "compiler-rt/lib/sanitizer_common/sanitizer_libc.h".
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_H
+#define SANITIZER_SYMBOLIZER_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+// WARNING: Do not include system headers here. See details above.
+
+namespace __sanitizer {
+
+struct AddressInfo {
+ uptr address;
+ char *module;
+ uptr module_offset;
+ char *function;
+ char *file;
+ int line;
+ int column;
+
+ AddressInfo() {
+ internal_memset(this, 0, sizeof(AddressInfo));
+ }
+ // Deletes all strings and sets all fields to zero.
+ void Clear();
+
+ void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
+ uptr mod_offset) {
+ address = addr;
+ module = internal_strdup(mod_name);
+ module_offset = mod_offset;
+ }
+};
+
+struct DataInfo {
+ uptr address;
+ char *module;
+ uptr module_offset;
+ char *name;
+ uptr start;
+ uptr size;
+};
+
+// Fills at most "max_frames" elements of "frames" with descriptions
+// for a given address (in all inlined functions). Returns the number
+// of descriptions actually filled.
+// This function should NOT be called from two threads simultaneously.
+uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
+bool SymbolizeData(uptr address, DataInfo *info);
+
+bool IsSymbolizerAvailable();
+
+// Attempts to demangle the provided C++ mangled name.
+const char *Demangle(const char *Name);
+
+// Starts external symbolizer program in a subprocess. Sanitizer communicates
+// with external symbolizer via pipes.
+bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
+
+class LoadedModule {
+ public:
+ LoadedModule(const char *module_name, uptr base_address);
+ void addAddressRange(uptr beg, uptr end);
+ bool containsAddress(uptr address) const;
+
+ const char *full_name() const { return full_name_; }
+ uptr base_address() const { return base_address_; }
+
+ private:
+ struct AddressRange {
+ uptr beg;
+ uptr end;
+ };
+ char *full_name_;
+ uptr base_address_;
+ static const uptr kMaxNumberOfAddressRanges = 6;
+ AddressRange ranges_[kMaxNumberOfAddressRanges];
+ uptr n_ranges_;
+};
+
+// Creates external symbolizer connected via pipe, user should write
+// to output_fd and read from input_fd.
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd);
+
+// OS-dependent function that fills array with descriptions of at most
+// "max_modules" currently loaded modules. Returns the number of
+// initialized modules.
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_H
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc
new file mode 100644
index 000000000..b356f9a09
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc
@@ -0,0 +1,40 @@
+//===-- sanitizer_symbolizer_itanium.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between the sanitizer run-time libraries.
+// Itanium C++ ABI-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#if defined(__APPLE__) || defined(__linux__)
+
+#include "sanitizer_symbolizer.h"
+
+#include <stdlib.h>
+
+// C++ demangling function, as required by Itanium C++ ABI. This is weak,
+// because we do not require a C++ ABI library to be linked to a program
+// using sanitizers; if it's not present, we'll just use the mangled name.
+namespace __cxxabiv1 {
+ extern "C" char *__cxa_demangle(const char *mangled, char *buffer,
+ size_t *length, int *status)
+ SANITIZER_WEAK_ATTRIBUTE;
+}
+
+const char *__sanitizer::Demangle(const char *MangledName) {
+ // FIXME: __cxa_demangle aggressively insists on allocating memory.
+ // There's not much we can do about that, short of providing our
+ // own demangler (libc++abi's implementation could be adapted so that
+ // it does not allocate). For now, we just call it anyway, and we leak
+ // the returned value.
+ if (__cxxabiv1::__cxa_demangle)
+ if (const char *Demangled =
+ __cxxabiv1::__cxa_demangle(MangledName, 0, 0, 0))
+ return Demangled;
+
+ return MangledName;
+}
+
+#endif // __APPLE__ || __linux__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_linux.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_linux.cc
new file mode 100644
index 000000000..01f1e4588
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_linux.cc
@@ -0,0 +1,180 @@
+//===-- sanitizer_symbolizer_linux.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Linux-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef __linux__
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_symbolizer.h"
+
+#include <elf.h>
+#include <errno.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#if !defined(__ANDROID__) && !defined(ANDROID)
+#include <link.h>
+#endif
+
+namespace __sanitizer {
+
+static const int kSymbolizerStartupTimeMillis = 10;
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ if (!FileExists(path_to_symbolizer)) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ return false;
+ }
+
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+
+ int pid = fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = getdtablesize(); fd > 2; fd--)
+ internal_close(fd);
+ execl(path_to_symbolizer, path_to_symbolizer, (char*)0);
+ internal__exit(1);
+ }
+
+ // Continue execution in parent process.
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ *input_fd = infd[0];
+ *output_fd = outfd[1];
+
+ // Check that symbolizer subprocess started successfully.
+ int pid_status;
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ int exited_pid = waitpid(pid, &pid_status, WNOHANG);
+ if (exited_pid != 0) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+}
+
+#if defined(__ANDROID__) || defined(ANDROID)
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+}
+#else // ANDROID
+typedef ElfW(Phdr) Elf_Phdr;
+
+struct DlIteratePhdrData {
+ LoadedModule *modules;
+ uptr current_n;
+ uptr max_n;
+};
+
+static const uptr kMaxPathLength = 512;
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
+ if (data->current_n == data->max_n)
+ return 0;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ module_name.data()[0] = '\0';
+ if (data->current_n == 0) {
+ // First module is the binary itself.
+ uptr module_name_len = internal_readlink(
+ "/proc/self/exe", module_name.data(), module_name.size());
+ CHECK_NE(module_name_len, (uptr)-1);
+ CHECK_LT(module_name_len, module_name.size());
+ module_name[module_name_len] = '\0';
+ } else if (info->dlpi_name) {
+ internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
+ }
+ if (module_name.data()[0] == '\0')
+ return 0;
+ void *mem = &data->modules[data->current_n];
+ LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
+ info->dlpi_addr);
+ data->current_n++;
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[i];
+ if (phdr->p_type == PT_LOAD) {
+ uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr cur_end = cur_beg + phdr->p_memsz;
+ cur_module->addAddressRange(cur_beg, cur_end);
+ }
+ }
+ return 0;
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ CHECK(modules);
+ DlIteratePhdrData data = {modules, 0, max_modules};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+ return data.current_n;
+}
+#endif // ANDROID
+
+} // namespace __sanitizer
+
+#endif // __linux__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
new file mode 100644
index 000000000..a1b931b73
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -0,0 +1,29 @@
+//===-- sanitizer_symbolizer_mac.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Mac-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef __APPLE__
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ UNIMPLEMENTED();
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+}
+
+} // namespace __sanitizer
+
+#endif // __APPLE__
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
new file mode 100644
index 000000000..ad0053234
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -0,0 +1,35 @@
+//===-- sanitizer_symbolizer_win.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Windows-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+#include <windows.h>
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
+ int *input_fd, int *output_fd) {
+ UNIMPLEMENTED();
+}
+
+uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
+ UNIMPLEMENTED();
+};
+
+const char *Demangle(const char *MangledName) {
+ return MangledName;
+}
+
+} // namespace __sanitizer
+
+#endif // _WIN32
diff --git a/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_win.cc b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_win.cc
new file mode 100644
index 000000000..695265594
--- /dev/null
+++ b/gcc-4.8.3/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -0,0 +1,294 @@
+//===-- sanitizer_win.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements windows-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#define NOGDI
+#include <stdlib.h>
+#include <io.h>
+#include <windows.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// --------------------- sanitizer_common.h
+uptr GetPageSize() {
+ return 1U << 14; // FIXME: is this configurable?
+}
+
+uptr GetMmapGranularity() {
+ return 1U << 16; // FIXME: is this configurable?
+}
+
+bool FileExists(const char *filename) {
+ UNIMPLEMENTED();
+}
+
+int GetPid() {
+ return GetProcessId(GetCurrentProcess());
+}
+
+uptr GetThreadSelf() {
+ return GetCurrentThreadId();
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
+ // FIXME: is it possible for the stack to not be a single allocation?
+ // Are these values what ASan expects to get (reserved, not committed;
+ // including stack guard page) ?
+ *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
+ *stack_bottom = (uptr)mbi.AllocationBase;
+}
+
+void *MmapOrDie(uptr size, const char *mem_type) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0) {
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
+ size, size, mem_type);
+ CHECK("unable to mmap" && 0);
+ }
+ return rv;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+ // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
+ // but on Win64 it does.
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0)
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
+ size, size, fixed_addr, GetLastError());
+ return p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ return MmapFixedNoReserve(fixed_addr, size);
+}
+
+void *Mprotect(uptr fixed_addr, uptr size) {
+ return VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+}
+
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvice-analog when we move to 64-bits.
+}
+
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ // FIXME: shall we do anything here on Windows?
+ return true;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ UNIMPLEMENTED();
+}
+
+const char *GetEnv(const char *name) {
+ static char env_buffer[32767] = {};
+
+ // Note: this implementation stores the result in a static buffer so we only
+ // allow it to be called just once.
+ static bool called_once = false;
+ if (called_once)
+ UNIMPLEMENTED();
+ called_once = true;
+
+ DWORD rv = GetEnvironmentVariableA(name, env_buffer, sizeof(env_buffer));
+ if (rv > 0 && rv < sizeof(env_buffer))
+ return env_buffer;
+ return 0;
+}
+
+const char *GetPwd() {
+ UNIMPLEMENTED();
+}
+
+u32 GetUid() {
+ UNIMPLEMENTED();
+}
+
+void DumpProcessMap() {
+ UNIMPLEMENTED();
+}
+
+void DisableCoreDumper() {
+ UNIMPLEMENTED();
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
+
+bool StackSizeIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ UNIMPLEMENTED();
+}
+
+void SleepForSeconds(int seconds) {
+ Sleep(seconds * 1000);
+}
+
+void SleepForMillis(int millis) {
+ Sleep(millis);
+}
+
+void Abort() {
+ abort();
+ _exit(-1); // abort is not NORETURN on Windows.
+}
+
+#ifndef SANITIZER_GO
+int Atexit(void (*function)(void)) {
+ return atexit(function);
+}
+#endif
+
+// ------------------ sanitizer_libc.h
+void *internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, u64 offset) {
+ UNIMPLEMENTED();
+}
+
+int internal_munmap(void *addr, uptr length) {
+ UNIMPLEMENTED();
+}
+
+int internal_close(fd_t fd) {
+ UNIMPLEMENTED();
+}
+
+int internal_isatty(fd_t fd) {
+ return _isatty(fd);
+}
+
+fd_t internal_open(const char *filename, int flags) {
+ UNIMPLEMENTED();
+}
+
+fd_t internal_open(const char *filename, int flags, u32 mode) {
+ UNIMPLEMENTED();
+}
+
+fd_t OpenFile(const char *filename, bool write) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ if (fd != kStderrFd)
+ UNIMPLEMENTED();
+ HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
+ if (err == 0)
+ return 0; // FIXME: this might not work on some apps.
+ DWORD ret;
+ if (!WriteFile(err, buf, count, &ret, 0))
+ return 0;
+ return ret;
+}
+
+int internal_stat(const char *path, void *buf) {
+ UNIMPLEMENTED();
+}
+
+int internal_lstat(const char *path, void *buf) {
+ UNIMPLEMENTED();
+}
+
+int internal_fstat(fd_t fd, void *buf) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_filesize(fd_t fd) {
+ UNIMPLEMENTED();
+}
+
+int internal_dup2(int oldfd, int newfd) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ UNIMPLEMENTED();
+}
+
+int internal_sched_yield() {
+ Sleep(0);
+ return 0;
+}
+
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+// ---------------------- BlockingMutex ---------------- {{{1
+const uptr LOCK_UNINITIALIZED = 0;
+const uptr LOCK_READY = (uptr)-1;
+
+BlockingMutex::BlockingMutex(LinkerInitialized li) {
+ // FIXME: see comments in BlockingMutex::Lock() for the details.
+ CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
+
+ CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
+ InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ owner_ = LOCK_READY;
+}
+
+void BlockingMutex::Lock() {
+ if (owner_ == LOCK_UNINITIALIZED) {
+ // FIXME: hm, global BlockingMutex objects are not initialized?!?
+ // This might be a side effect of the clang+cl+link Frankenbuild...
+ new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
+
+ // FIXME: If it turns out the linker doesn't invoke our
+ // constructors, we should probably manually Lock/Unlock all the global
+ // locks while we're starting in one thread to avoid double-init races.
+ }
+ EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ CHECK_EQ(owner_, LOCK_READY);
+ owner_ = GetThreadSelf();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK_EQ(owner_, GetThreadSelf());
+ owner_ = LOCK_READY;
+ LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+}
+
+} // namespace __sanitizer
+
+#endif // _WIN32