aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/libsanitizer/tsan
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/libsanitizer/tsan')
-rw-r--r--gcc-4.9/libsanitizer/tsan/Makefile.am90
-rw-r--r--gcc-4.9/libsanitizer/tsan/Makefile.in678
-rw-r--r--gcc-4.9/libsanitizer/tsan/libtool-version6
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_clock.cc109
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_clock.h78
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_defs.h165
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_fd.cc298
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_fd.h63
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_flags.cc127
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_flags.h94
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_ignoreset.cc45
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_ignoreset.h36
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interceptors.cc2295
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface.cc96
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface.h65
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_ann.cc462
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_ann.h31
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.cc677
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.h203
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_inl.h83
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_java.cc329
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_interface_java.h81
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_md5.cc243
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mman.cc273
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mman.h82
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mutex.cc272
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mutex.h80
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mutexset.cc87
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_mutexset.h63
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_platform.h171
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_platform_linux.cc386
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_platform_mac.cc93
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_platform_windows.cc45
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_report.cc302
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_report.h119
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl.cc771
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl.h777
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl_amd64.S302
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl_mutex.cc346
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl_report.cc722
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_rtl_thread.cc397
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_stat.cc485
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_stat.h483
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_suppressions.cc168
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_suppressions.h27
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_symbolize.cc156
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_symbolize.h31
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc191
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_sync.cc304
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_sync.h125
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_trace.h76
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_update_shadow_word_inl.h74
-rw-r--r--gcc-4.9/libsanitizer/tsan/tsan_vector.h113
53 files changed, 13875 insertions, 0 deletions
diff --git a/gcc-4.9/libsanitizer/tsan/Makefile.am b/gcc-4.9/libsanitizer/tsan/Makefile.am
new file mode 100644
index 000000000..39ed2529c
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/Makefile.am
@@ -0,0 +1,90 @@
+AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
+AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+
+toolexeclib_LTLIBRARIES = libtsan.la
+
+tsan_files = \
+ tsan_clock.cc \
+ tsan_interface_atomic.cc \
+ tsan_mutex.cc \
+ tsan_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_symbolize.cc \
+ tsan_flags.cc \
+ tsan_interface.cc \
+ tsan_platform_linux.cc \
+ tsan_rtl.cc \
+ tsan_stat.cc \
+ tsan_sync.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_md5.cc \
+ tsan_platform_mac.cc \
+ tsan_rtl_mutex.cc \
+ tsan_suppressions.cc \
+ tsan_interface_ann.cc \
+ tsan_mman.cc \
+ tsan_rtl_report.cc \
+ tsan_fd.cc \
+ tsan_interface_java.cc \
+ tsan_mutexset.cc \
+ tsan_symbolize_addr2line_linux.cc \
+ tsan_rtl_amd64.S
+
+libtsan_la_SOURCES = $(tsan_files)
+libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
+if LIBBACKTRACE_SUPPORTED
+libtsan_la_LIBADD += $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+endif
+libtsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES=
+
+## ################################################################
+
diff --git a/gcc-4.9/libsanitizer/tsan/Makefile.in b/gcc-4.9/libsanitizer/tsan/Makefile.in
new file mode 100644
index 000000000..01c27b9b7
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/Makefile.in
@@ -0,0 +1,678 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+@LIBBACKTRACE_SUPPORTED_TRUE@am__append_1 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+subdir = tsan
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
+ $(top_srcdir)/../config/depstand.m4 \
+ $(top_srcdir)/../config/lead-dot.m4 \
+ $(top_srcdir)/../config/libstdc++-raw-cxx.m4 \
+ $(top_srcdir)/../config/multi.m4 \
+ $(top_srcdir)/../config/override.m4 \
+ $(top_srcdir)/../config/stdint.m4 \
+ $(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \
+ $(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \
+ $(top_srcdir)/acinclude.m4 $(top_srcdir)/../libtool.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(toolexeclibdir)"
+LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
+am__DEPENDENCIES_1 =
+libtsan_la_DEPENDENCIES = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(top_builddir)/interception/libinterception.la \
+ $(am__append_1) $(am__DEPENDENCIES_1)
+am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
+ tsan_report.lo tsan_rtl_thread.lo tsan_symbolize.lo \
+ tsan_flags.lo tsan_interface.lo tsan_platform_linux.lo \
+ tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_ignoreset.lo \
+ tsan_interceptors.lo tsan_md5.lo tsan_platform_mac.lo \
+ tsan_rtl_mutex.lo tsan_suppressions.lo tsan_interface_ann.lo \
+ tsan_mman.lo tsan_rtl_report.lo tsan_fd.lo \
+ tsan_interface_java.lo tsan_mutexset.lo \
+ tsan_symbolize_addr2line_linux.lo tsan_rtl_amd64.lo
+am_libtsan_la_OBJECTS = $(am__objects_1)
+libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
+libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+ $(CXXFLAGS) $(libtsan_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/../depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libtsan_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+ACLOCAL = @ACLOCAL@
+ALLOC_FILE = @ALLOC_FILE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BACKTRACE_SUPPORTED = @BACKTRACE_SUPPORTED@
+BACKTRACE_SUPPORTS_THREADS = @BACKTRACE_SUPPORTS_THREADS@
+BACKTRACE_USES_MALLOC = @BACKTRACE_USES_MALLOC@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FORMAT_FILE = @FORMAT_FILE@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSTDCXX_RAW_CXX_CXXFLAGS = @LIBSTDCXX_RAW_CXX_CXXFLAGS@
+LIBSTDCXX_RAW_CXX_LDFLAGS = @LIBSTDCXX_RAW_CXX_LDFLAGS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+VERSION = @VERSION@
+VIEW_FILE = @VIEW_FILE@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_shared = @enable_shared@
+enable_static = @enable_static@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+link_libasan = @link_libasan@
+link_liblsan = @link_liblsan@
+link_libtsan = @link_libtsan@
+link_libubsan = @link_libubsan@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+multi_basedir = @multi_basedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_noncanonical = @target_noncanonical@
+target_os = @target_os@
+target_vendor = @target_vendor@
+toolexecdir = @toolexecdir@
+toolexeclibdir = @toolexeclibdir@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
+ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
+ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+toolexeclib_LTLIBRARIES = libtsan.la
+tsan_files = \
+ tsan_clock.cc \
+ tsan_interface_atomic.cc \
+ tsan_mutex.cc \
+ tsan_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_symbolize.cc \
+ tsan_flags.cc \
+ tsan_interface.cc \
+ tsan_platform_linux.cc \
+ tsan_rtl.cc \
+ tsan_stat.cc \
+ tsan_sync.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_md5.cc \
+ tsan_platform_mac.cc \
+ tsan_rtl_mutex.cc \
+ tsan_suppressions.cc \
+ tsan_interface_ann.cc \
+ tsan_mman.cc \
+ tsan_rtl_report.cc \
+ tsan_fd.cc \
+ tsan_interface_java.cc \
+ tsan_mutexset.cc \
+ tsan_symbolize_addr2line_linux.cc \
+ tsan_rtl_amd64.S
+
+libtsan_la_SOURCES = $(tsan_files)
+libtsan_la_LIBADD = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(top_builddir)/interception/libinterception.la \
+ $(am__append_1) $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES =
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .S .cc .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsan/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign tsan/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ test -z "$(toolexeclibdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)"
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
+ list2=; for p in $$list; do \
+ if test -f $$p; then \
+ list2="$$list2 $$p"; \
+ else :; fi; \
+ done; \
+ test -z "$$list2" || { \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \
+ }
+
+uninstall-toolexeclibLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
+ for p in $$list; do \
+ $(am__strip_dir) \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \
+ done
+
+clean-toolexeclibLTLIBRARIES:
+ -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES)
+ @list='$(toolexeclib_LTLIBRARIES)'; for p in $$list; do \
+ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+ test "$$dir" != "$$p" || dir=.; \
+ echo "rm -f \"$${dir}/so_locations\""; \
+ rm -f "$${dir}/so_locations"; \
+ done
+libtsan.la: $(libtsan_la_OBJECTS) $(libtsan_la_DEPENDENCIES)
+ $(libtsan_la_LINK) -rpath $(toolexeclibdir) $(libtsan_la_OBJECTS) $(libtsan_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_ignoreset.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_java.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_report.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_thread.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stat.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize_addr2line_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_sync.Plo@am__quote@
+
+.S.o:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
+
+.S.obj:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.S.lo:
+@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
+
+.cc.o:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cc.obj:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cc.lo:
+@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+ for dir in "$(DESTDIR)$(toolexeclibdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-toolexeclibLTLIBRARIES \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-toolexeclibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-toolexeclibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-toolexeclibLTLIBRARIES ctags distclean \
+ distclean-compile distclean-generic distclean-libtool \
+ distclean-tags dvi dvi-am html html-am info info-am install \
+ install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip install-toolexeclibLTLIBRARIES installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-toolexeclibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/gcc-4.9/libsanitizer/tsan/libtool-version b/gcc-4.9/libsanitizer/tsan/libtool-version
new file mode 100644
index 000000000..204fdd2d8
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/libtool-version
@@ -0,0 +1,6 @@
+# This file is used to maintain libtool version info for libmudflap. See
+# the libtool manual to understand the meaning of the fields. This is
+# a separate file so that version updates don't involve re-running
+# automake.
+# CURRENT:REVISION:AGE
+0:0:0
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_clock.cc b/gcc-4.9/libsanitizer/tsan/tsan_clock.cc
new file mode 100644
index 000000000..5d45a5d15
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_clock.cc
@@ -0,0 +1,109 @@
+//===-- tsan_clock.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_clock.h"
+#include "tsan_rtl.h"
+
+// It's possible to optimize clock operations for some important cases
+// so that they are O(1). The cases include singletons, once's, local mutexes.
+// First, SyncClock must be re-implemented to allow indexing by tid.
+// It must not necessarily be a full vector clock, though. For example it may
+// be a multi-level table.
+// Then, each slot in SyncClock must contain a dirty bit (it's united with
+// the clock value, so no space increase). The acquire algorithm looks
+// as follows:
+// void acquire(thr, tid, thr_clock, sync_clock) {
+// if (!sync_clock[tid].dirty)
+// return; // No new info to acquire.
+// // This handles constant reads of singleton pointers and
+// // stop-flags.
+// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
+// sync_clock[tid].dirty = false;
+// sync_clock.dirty_count--;
+// }
+// The release operation looks as follows:
+// void release(thr, tid, thr_clock, sync_clock) {
+// // thr->sync_cache is a simple fixed-size hash-based cache that holds
+// // several previous sync_clock's.
+// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
+// // The thread did no acquire operations since last release on this clock.
+// // So update only the thread's slot (other slots can't possibly change).
+// sync_clock[tid].clock = thr->epoch;
+// if (sync_clock.dirty_count == sync_clock.cnt
+// || (sync_clock.dirty_count == sync_clock.cnt - 1
+// && sync_clock[tid].dirty == false))
+// // All dirty flags are set, bail out.
+// return;
+// set all dirty bits, but preserve the thread's bit. // O(N)
+// update sync_clock.dirty_count;
+// return;
+// }
+// release_impl(thr_clock, sync_clock); // As usual, O(N).
+// set all dirty bits, but preserve the thread's bit.
+// // The previous step is combined with release_impl(), so that
+// // we scan the arrays only once.
+// update sync_clock.dirty_count;
+// }
+
+namespace __tsan {
+
+ThreadClock::ThreadClock() {
+ nclk_ = 0;
+ for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
+ clk_[i] = 0;
+}
+
+void ThreadClock::acquire(const SyncClock *src) {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(src->clk_.Size() <= kMaxTid);
+
+ const uptr nclk = src->clk_.Size();
+ if (nclk == 0)
+ return;
+ nclk_ = max(nclk_, nclk);
+ for (uptr i = 0; i < nclk; i++) {
+ if (clk_[i] < src->clk_[i])
+ clk_[i] = src->clk_[i];
+ }
+}
+
+void ThreadClock::release(SyncClock *dst) const {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(dst->clk_.Size() <= kMaxTid);
+
+ if (dst->clk_.Size() < nclk_)
+ dst->clk_.Resize(nclk_);
+ for (uptr i = 0; i < nclk_; i++) {
+ if (dst->clk_[i] < clk_[i])
+ dst->clk_[i] = clk_[i];
+ }
+}
+
+void ThreadClock::ReleaseStore(SyncClock *dst) const {
+ DCHECK(nclk_ <= kMaxTid);
+ DCHECK(dst->clk_.Size() <= kMaxTid);
+
+ if (dst->clk_.Size() < nclk_)
+ dst->clk_.Resize(nclk_);
+ for (uptr i = 0; i < nclk_; i++)
+ dst->clk_[i] = clk_[i];
+ for (uptr i = nclk_; i < dst->clk_.Size(); i++)
+ dst->clk_[i] = 0;
+}
+
+void ThreadClock::acq_rel(SyncClock *dst) {
+ acquire(dst);
+ release(dst);
+}
+
+SyncClock::SyncClock()
+ : clk_(MBlockClock) {
+}
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_clock.h b/gcc-4.9/libsanitizer/tsan/tsan_clock.h
new file mode 100644
index 000000000..8e4bf99ca
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_clock.h
@@ -0,0 +1,78 @@
+//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_CLOCK_H
+#define TSAN_CLOCK_H
+
+#include "tsan_defs.h"
+#include "tsan_vector.h"
+
+namespace __tsan {
+
+// The clock that lives in sync variables (mutexes, atomics, etc).
+class SyncClock {
+ public:
+ SyncClock();
+
+ uptr size() const {
+ return clk_.Size();
+ }
+
+ void Reset() {
+ clk_.Reset();
+ }
+
+ private:
+ Vector<u64> clk_;
+ friend struct ThreadClock;
+};
+
+// The clock that lives in threads.
+struct ThreadClock {
+ public:
+ ThreadClock();
+
+ u64 get(unsigned tid) const {
+ DCHECK_LT(tid, kMaxTidInClock);
+ return clk_[tid];
+ }
+
+ void set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid]);
+ clk_[tid] = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ }
+
+ void tick(unsigned tid) {
+ DCHECK_LT(tid, kMaxTid);
+ clk_[tid]++;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ }
+
+ uptr size() const {
+ return nclk_;
+ }
+
+ void acquire(const SyncClock *src);
+ void release(SyncClock *dst) const;
+ void acq_rel(SyncClock *dst);
+ void ReleaseStore(SyncClock *dst) const;
+
+ private:
+ uptr nclk_;
+ u64 clk_[kMaxTidInClock];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_CLOCK_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_defs.h b/gcc-4.9/libsanitizer/tsan/tsan_defs.h
new file mode 100644
index 000000000..3f20797ff
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_defs.h
@@ -0,0 +1,165 @@
+//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_DEFS_H
+#define TSAN_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_stat.h"
+
+#ifndef TSAN_DEBUG
+#define TSAN_DEBUG 0
+#endif // TSAN_DEBUG
+
+namespace __tsan {
+
+#ifdef TSAN_GO
+const bool kGoMode = true;
+const bool kCppMode = false;
+const char *const kTsanOptionsEnv = "GORACE";
+// Go linker does not support weak symbols.
+#define CPP_WEAK
+#else
+const bool kGoMode = false;
+const bool kCppMode = true;
+const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
+#define CPP_WEAK WEAK
+#endif
+
+const int kTidBits = 13;
+const unsigned kMaxTid = 1 << kTidBits;
+const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+const int kClkBits = 42;
+const uptr kShadowStackSize = 64 * 1024;
+const uptr kTraceStackSize = 256;
+
+#ifdef TSAN_SHADOW_COUNT
+# if TSAN_SHADOW_COUNT == 2 \
+ || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8
+const uptr kShadowCnt = TSAN_SHADOW_COUNT;
+# else
+# error "TSAN_SHADOW_COUNT must be one of 2,4,8"
+# endif
+#else
+// Count of shadow values in a shadow cell.
+const uptr kShadowCnt = 4;
+#endif
+
+// That many user bytes are mapped onto a single shadow cell.
+const uptr kShadowCell = 8;
+
+// Size of a single shadow value (u64).
+const uptr kShadowSize = 8;
+
+// Shadow memory is kShadowMultiplier times larger than user memory.
+const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+
+#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
+const bool kCollectStats = true;
+#else
+const bool kCollectStats = false;
+#endif
+
+// The following "build consistency" machinery ensures that all source files
+// are built in the same configuration. Inconsistent builds lead to
+// hard to debug crashes.
+#if TSAN_DEBUG
+void build_consistency_debug();
+#else
+void build_consistency_release();
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats();
+#else
+void build_consistency_nostats();
+#endif
+
+#if TSAN_SHADOW_COUNT == 1
+void build_consistency_shadow1();
+#elif TSAN_SHADOW_COUNT == 2
+void build_consistency_shadow2();
+#elif TSAN_SHADOW_COUNT == 4
+void build_consistency_shadow4();
+#else
+void build_consistency_shadow8();
+#endif
+
+static inline void USED build_consistency() {
+#if TSAN_DEBUG
+ build_consistency_debug();
+#else
+ build_consistency_release();
+#endif
+#if TSAN_COLLECT_STATS
+ build_consistency_stats();
+#else
+ build_consistency_nostats();
+#endif
+#if TSAN_SHADOW_COUNT == 1
+ build_consistency_shadow1();
+#elif TSAN_SHADOW_COUNT == 2
+ build_consistency_shadow2();
+#elif TSAN_SHADOW_COUNT == 4
+ build_consistency_shadow4();
+#else
+ build_consistency_shadow8();
+#endif
+}
+
+template<typename T>
+T min(T a, T b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+T max(T a, T b) {
+ return a > b ? a : b;
+}
+
+template<typename T>
+T RoundUp(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)(((u64)p + align - 1) & ~(align - 1));
+}
+
+template<typename T>
+T RoundDown(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)((u64)p & ~(align - 1));
+}
+
+// Zeroizes high part, returns 'bits' lsb bits.
+template<typename T>
+T GetLsb(T v, int bits) {
+ return (T)((u64)v & ((1ull << bits) - 1));
+}
+
+struct MD5Hash {
+ u64 hash[2];
+ bool operator==(const MD5Hash &other) const;
+};
+
+MD5Hash md5_hash(const void *data, uptr size);
+
+struct ThreadState;
+class ThreadContext;
+struct Context;
+struct ReportStack;
+class ReportDesc;
+class RegionAlloc;
+class StackTrace;
+struct MBlock;
+
+} // namespace __tsan
+
+#endif // TSAN_DEFS_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_fd.cc b/gcc-4.9/libsanitizer/tsan/tsan_fd.cc
new file mode 100644
index 000000000..b7ac3111c
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_fd.cc
@@ -0,0 +1,298 @@
+//===-- tsan_fd.cc --------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fd.h"
+#include "tsan_rtl.h"
+#include <sanitizer_common/sanitizer_atomic.h>
+
+namespace __tsan {
+
+const int kTableSizeL1 = 1024;
+const int kTableSizeL2 = 1024;
+const int kTableSize = kTableSizeL1 * kTableSizeL2;
+
+struct FdSync {
+ atomic_uint64_t rc;
+};
+
+struct FdDesc {
+ FdSync *sync;
+ int creation_tid;
+ u32 creation_stack;
+};
+
+struct FdContext {
+ atomic_uintptr_t tab[kTableSizeL1];
+ // Addresses used for synchronization.
+ FdSync globsync;
+ FdSync filesync;
+ FdSync socksync;
+ u64 connectsync;
+};
+
+static FdContext fdctx;
+
+static bool bogusfd(int fd) {
+ // Apparently a bogus fd value.
+ return fd < 0 || fd >= kTableSize;
+}
+
+static FdSync *allocsync() {
+ FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+ atomic_store(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static FdSync *ref(FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
+ atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static void unref(ThreadState *thr, uptr pc, FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
+ if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
+ CHECK_NE(s, &fdctx.globsync);
+ CHECK_NE(s, &fdctx.filesync);
+ CHECK_NE(s, &fdctx.socksync);
+ SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
+ if (v)
+ DestroyAndFree(v);
+ internal_free(s);
+ }
+ }
+}
+
+static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
+ CHECK_GE(fd, 0);
+ CHECK_LT(fd, kTableSize);
+ atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
+ uptr l1 = atomic_load(pl1, memory_order_consume);
+ if (l1 == 0) {
+ uptr size = kTableSizeL2 * sizeof(FdDesc);
+ // We need this to reside in user memory to properly catch races on it.
+ void *p = user_alloc(thr, pc, size);
+ internal_memset(p, 0, size);
+ MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
+ if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
+ l1 = (uptr)p;
+ else
+ user_free(thr, pc, p);
+ }
+ return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
+}
+
+// pd must be already ref'ed.
+static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
+ FdDesc *d = fddesc(thr, pc, fd);
+ // As a matter of fact, we don't intercept all close calls.
+ // See e.g. libc __res_iclose().
+ if (d->sync) {
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ }
+ if (flags()->io_sync == 0) {
+ unref(thr, pc, s);
+ } else if (flags()->io_sync == 1) {
+ d->sync = s;
+ } else if (flags()->io_sync == 2) {
+ unref(thr, pc, s);
+ d->sync = &fdctx.globsync;
+ }
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
+ // To catch races between fd usage and open.
+ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+}
+
+void FdInit() {
+ atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
+}
+
+void FdOnFork(ThreadState *thr, uptr pc) {
+ // On fork() we need to reset all fd's, because the child is going
+ // close all them, and that will cause races between previous read/write
+ // and the close.
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ for (int l2 = 0; l2 < kTableSizeL2; l2++) {
+ FdDesc *d = &tab[l2];
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ }
+ }
+}
+
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
+ int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
+ FdDesc *d = &tab[l2];
+ *fd = l1 * kTableSizeL1 + l2;
+ *tid = d->creation_tid;
+ *stack = d->creation_stack;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FdAcquire(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Acquire(thr, pc, (uptr)s);
+}
+
+void FdRelease(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Release(thr, pc, (uptr)s);
+}
+
+void FdAccess(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+}
+
+void FdClose(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ // To catch races between fd usage and close.
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ d->creation_tid = 0;
+ d->creation_stack = 0;
+}
+
+void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.filesync);
+}
+
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
+ DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
+ if (bogusfd(oldfd) || bogusfd(newfd))
+ return;
+ // Ignore the case when user dups not yet connected socket.
+ FdDesc *od = fddesc(thr, pc, oldfd);
+ MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+ FdClose(thr, pc, newfd);
+ init(thr, pc, newfd, ref(od->sync));
+}
+
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
+ DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
+ FdSync *s = allocsync();
+ init(thr, pc, rfd, ref(s));
+ init(thr, pc, wfd, ref(s));
+ unref(thr, pc, s);
+}
+
+void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync());
+}
+
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync());
+}
+
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // It can be a UDP socket.
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
+ DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Acquire(thr, pc, (uptr)&fdctx.connectsync);
+ init(thr, pc, newfd, &fdctx.socksync);
+}
+
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Release(thr, pc, (uptr)&fdctx.connectsync);
+}
+
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+uptr File2addr(char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+uptr Dir2addr(char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_fd.h b/gcc-4.9/libsanitizer/tsan/tsan_fd.h
new file mode 100644
index 000000000..330687322
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_fd.h
@@ -0,0 +1,63 @@
+//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// This file handles synchronization via IO.
+// People use IO for synchronization along the lines of:
+//
+// int X;
+// int client_socket; // initialized elsewhere
+// int server_socket; // initialized elsewhere
+//
+// Thread 1:
+// X = 42;
+// send(client_socket, ...);
+//
+// Thread 2:
+// if (recv(server_socket, ...) > 0)
+// assert(X == 42);
+//
+// This file determines the scope of the file descriptor (pipe, socket,
+// all local files, etc) and executes acquire and release operations on
+// the scope as necessary. Some scopes are very fine grained (e.g. pipe
+// operations synchronize only with operations on the same pipe), while
+// others are corse-grained (e.g. all operations on local files synchronize
+// with each other).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FD_H
+#define TSAN_FD_H
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void FdInit();
+void FdAcquire(ThreadState *thr, uptr pc, int fd);
+void FdRelease(ThreadState *thr, uptr pc, int fd);
+void FdAccess(ThreadState *thr, uptr pc, int fd);
+void FdClose(ThreadState *thr, uptr pc, int fd);
+void FdFileCreate(ThreadState *thr, uptr pc, int fd);
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
+void FdEventCreate(ThreadState *thr, uptr pc, int fd);
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+void FdOnFork(ThreadState *thr, uptr pc);
+
+uptr File2addr(char *path);
+uptr Dir2addr(char *path);
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_flags.cc b/gcc-4.9/libsanitizer/tsan/tsan_flags.cc
new file mode 100644
index 000000000..158e24f82
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_flags.cc
@@ -0,0 +1,127 @@
+//===-- tsan_flags.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+Flags *flags() {
+ return &CTX()->flags;
+}
+
+// Can be overriden in frontend.
+#ifdef TSAN_EXTERNAL_HOOKS
+void OverrideFlags(Flags *f);
+extern "C" const char* __tsan_default_options();
+#else
+void WEAK OverrideFlags(Flags *f) {
+ (void)f;
+}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+const char *WEAK __tsan_default_options() {
+ return "";
+}
+#endif
+
+static void ParseFlags(Flags *f, const char *env) {
+ ParseFlag(env, &f->enable_annotations, "enable_annotations");
+ ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
+ ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
+ ParseFlag(env, &f->suppress_java, "suppress_java");
+ ParseFlag(env, &f->report_bugs, "report_bugs");
+ ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
+ ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
+ ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
+ ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
+ ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
+ ParseFlag(env, &f->suppressions, "suppressions");
+ ParseFlag(env, &f->print_suppressions, "print_suppressions");
+ ParseFlag(env, &f->print_benign, "print_benign");
+ ParseFlag(env, &f->exitcode, "exitcode");
+ ParseFlag(env, &f->halt_on_error, "halt_on_error");
+ ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
+ ParseFlag(env, &f->profile_memory, "profile_memory");
+ ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
+ ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
+ ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
+ ParseFlag(env, &f->stop_on_start, "stop_on_start");
+ ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind");
+ ParseFlag(env, &f->history_size, "history_size");
+ ParseFlag(env, &f->io_sync, "io_sync");
+}
+
+void InitializeFlags(Flags *f, const char *env) {
+ internal_memset(f, 0, sizeof(*f));
+
+ // Default values.
+ f->enable_annotations = true;
+ f->suppress_equal_stacks = true;
+ f->suppress_equal_addresses = true;
+ f->suppress_java = false;
+ f->report_bugs = true;
+ f->report_thread_leaks = true;
+ f->report_destroy_locked = true;
+ f->report_signal_unsafe = true;
+ f->report_atomic_races = true;
+ f->force_seq_cst_atomics = false;
+ f->suppressions = "";
+ f->print_suppressions = false;
+ f->print_benign = false;
+ f->exitcode = 66;
+ f->halt_on_error = false;
+ f->atexit_sleep_ms = 1000;
+ f->profile_memory = "";
+ f->flush_memory_ms = 0;
+ f->flush_symbolizer_ms = 5000;
+ f->memory_limit_mb = 0;
+ f->stop_on_start = false;
+ f->running_on_valgrind = false;
+ f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
+ f->io_sync = 1;
+
+ SetCommonFlagsDefaults(f);
+
+ // Let a frontend override.
+ OverrideFlags(f);
+ ParseFlags(f, __tsan_default_options());
+ ParseCommonFlagsFromString(f, __tsan_default_options());
+ // Override from command line.
+ ParseFlags(f, env);
+ ParseCommonFlagsFromString(f, env);
+
+ // Copy back to common flags.
+ *common_flags() = *f;
+
+ // Sanity check.
+ if (!f->report_bugs) {
+ f->report_thread_leaks = false;
+ f->report_destroy_locked = false;
+ f->report_signal_unsafe = false;
+ }
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
+
+ if (f->io_sync < 0 || f->io_sync > 2) {
+ Printf("ThreadSanitizer: incorrect value for io_sync"
+ " (must be [0..2])\n");
+ Die();
+ }
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_flags.h b/gcc-4.9/libsanitizer/tsan/tsan_flags.h
new file mode 100644
index 000000000..05d11a451
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_flags.h
@@ -0,0 +1,94 @@
+//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FLAGS_H
+#define TSAN_FLAGS_H
+
+// ----------- ATTENTION -------------
+// ThreadSanitizer user may provide its implementation of weak
+// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
+// header may be included in the user code, and shouldn't include
+// other headers from TSan or common sanitizer runtime.
+
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __tsan {
+
+struct Flags : CommonFlags {
+ // Enable dynamic annotations, otherwise they are no-ops.
+ bool enable_annotations;
+ // Supress a race report if we've already output another race report
+ // with the same stack.
+ bool suppress_equal_stacks;
+ // Supress a race report if we've already output another race report
+ // on the same address.
+ bool suppress_equal_addresses;
+ // Suppress weird race reports that can be seen if JVM is embed
+ // into the process.
+ bool suppress_java;
+ // Turns off bug reporting entirely (useful for benchmarking).
+ bool report_bugs;
+ // Report thread leaks at exit?
+ bool report_thread_leaks;
+ // Report destruction of a locked mutex?
+ bool report_destroy_locked;
+ // Report violations of async signal-safety
+ // (e.g. malloc() call from a signal handler).
+ bool report_signal_unsafe;
+ // Report races between atomic and plain memory accesses.
+ bool report_atomic_races;
+ // If set, all atomics are effectively sequentially consistent (seq_cst),
+ // regardless of what user actually specified.
+ bool force_seq_cst_atomics;
+ // Suppressions filename.
+ const char *suppressions;
+ // Print matched suppressions at exit.
+ bool print_suppressions;
+ // Print matched "benign" races at exit.
+ bool print_benign;
+ // Override exit status if something was reported.
+ int exitcode;
+ // Exit after first reported error.
+ bool halt_on_error;
+ // Sleep in main thread before exiting for that many ms
+ // (useful to catch "at exit" races).
+ int atexit_sleep_ms;
+ // If set, periodically write memory profile to that file.
+ const char *profile_memory;
+ // Flush shadow memory every X ms.
+ int flush_memory_ms;
+ // Flush symbolizer caches every X ms.
+ int flush_symbolizer_ms;
+ // Resident memory limit in MB to aim at.
+ // If the process consumes more memory, then TSan will flush shadow memory.
+ int memory_limit_mb;
+ // Stops on start until __tsan_resume() is called (for debugging).
+ bool stop_on_start;
+ // Controls whether RunningOnValgrind() returns true or false.
+ bool running_on_valgrind;
+ // Per-thread history size, controls how many previous memory accesses
+ // are remembered per thread. Possible values are [0..7].
+ // history_size=0 amounts to 32K memory accesses. Each next value doubles
+ // the amount of memory accesses, up to history_size=7 that amounts to
+ // 4M memory accesses. The default value is 2 (128K memory accesses).
+ int history_size;
+ // Controls level of synchronization implied by IO operations.
+ // 0 - no synchronization
+ // 1 - reasonable level of synchronization (write->read)
+ // 2 - global synchronization of all IO operations
+ int io_sync;
+};
+
+Flags *flags();
+void InitializeFlags(Flags *flags, const char *env);
+} // namespace __tsan
+
+#endif // TSAN_FLAGS_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.cc b/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.cc
new file mode 100644
index 000000000..f0aec42eb
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.cc
@@ -0,0 +1,45 @@
+//===-- tsan_ignoreset.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_ignoreset.h"
+
+namespace __tsan {
+
+const uptr IgnoreSet::kMaxSize;
+
+IgnoreSet::IgnoreSet()
+ : size_() {
+}
+
+void IgnoreSet::Add(u32 stack_id) {
+ if (size_ == kMaxSize)
+ return;
+ for (uptr i = 0; i < size_; i++) {
+ if (stacks_[i] == stack_id)
+ return;
+ }
+ stacks_[size_++] = stack_id;
+}
+
+void IgnoreSet::Reset() {
+ size_ = 0;
+}
+
+uptr IgnoreSet::Size() const {
+ return size_;
+}
+
+u32 IgnoreSet::At(uptr i) const {
+ CHECK_LT(i, size_);
+ CHECK_LE(size_, kMaxSize);
+ return stacks_[i];
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.h b/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.h
new file mode 100644
index 000000000..5a250b75a
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_ignoreset.h
@@ -0,0 +1,36 @@
+//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// IgnoreSet holds a set of stack traces where ignores were enabled.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_IGNORESET_H
+#define TSAN_IGNORESET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class IgnoreSet {
+ public:
+ static const uptr kMaxSize = 16;
+
+ IgnoreSet();
+ void Add(u32 stack_id);
+ void Reset();
+ uptr Size() const;
+ u32 At(uptr i) const;
+
+ private:
+ uptr size_;
+ u32 stacks_[kMaxSize];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_IGNORESET_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interceptors.cc b/gcc-4.9/libsanitizer/tsan/tsan_interceptors.cc
new file mode 100644
index 000000000..0574beb91
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interceptors.cc
@@ -0,0 +1,2295 @@
+//===-- tsan_interceptors.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.inc
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "interception/interception.h"
+#include "tsan_interface.h"
+#include "tsan_platform.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_fd.h"
+
+using namespace __tsan; // NOLINT
+
+const int kSigCount = 64;
+
+struct my_siginfo_t {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+struct ucontext_t {
+ // The size is determined by looking at sizeof of real ucontext_t on linux.
+ u64 opaque[936 / sizeof(u64) + 1];
+};
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
+extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+extern "C" int pthread_setspecific(unsigned key, const void *v);
+extern "C" int pthread_mutexattr_gettype(void *a, int *type);
+extern "C" int pthread_yield();
+extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+// REAL(sigfillset) defined in common interceptors.
+DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
+extern "C" void *pthread_self();
+extern "C" void _exit(int status);
+extern "C" int *__errno_location();
+extern "C" int fileno_unlocked(void *stream);
+extern "C" void *__libc_malloc(uptr size);
+extern "C" void *__libc_calloc(uptr size, uptr n);
+extern "C" void *__libc_realloc(void *ptr, uptr size);
+extern "C" void __libc_free(void *ptr);
+extern "C" int mallopt(int param, int value);
+const int PTHREAD_MUTEX_RECURSIVE = 1;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+const int EINVAL = 22;
+const int EBUSY = 16;
+const int EOWNERDEAD = 130;
+const int EPOLL_CTL_ADD = 1;
+const int SIGILL = 4;
+const int SIGABRT = 6;
+const int SIGFPE = 8;
+const int SIGSEGV = 11;
+const int SIGPIPE = 13;
+const int SIGBUS = 7;
+const int SIGSYS = 31;
+void *const MAP_FAILED = (void*)-1;
+const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+const int MAP_FIXED = 0x10;
+typedef long long_t; // NOLINT
+
+// From /usr/include/unistd.h
+# define F_ULOCK 0 /* Unlock a previously locked region. */
+# define F_LOCK 1 /* Lock a region for exclusive use. */
+# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
+# define F_TEST 3 /* Test a region for other processes locks. */
+
+typedef void (*sighandler_t)(int sig);
+
+#define errno (*__errno_location())
+
+struct sigaction_t {
+ union {
+ sighandler_t sa_handler;
+ void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+ void (*sa_restorer)();
+};
+
+const sighandler_t SIG_DFL = (sighandler_t)0;
+const sighandler_t SIG_IGN = (sighandler_t)1;
+const sighandler_t SIG_ERR = (sighandler_t)-1;
+const int SA_SIGINFO = 4;
+const int SIG_SETMASK = 2;
+
+namespace std {
+struct nothrow_t {};
+} // namespace std
+
+static sigaction_t sigactions[kSigCount];
+
+namespace __tsan {
+struct SignalDesc {
+ bool armed;
+ bool sigaction;
+ my_siginfo_t siginfo;
+ ucontext_t ctx;
+};
+
+struct SignalContext {
+ int in_blocking_func;
+ int int_signal_send;
+ int pending_signal_count;
+ SignalDesc pending_signals[kSigCount];
+};
+
+// The object is 64-byte aligned, because we want hot data to be located in
+// a single cache line if possible (it's accessed in every interceptor).
+static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
+static LibIgnore *libignore() {
+ return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
+}
+
+void InitializeLibIgnore() {
+ libignore()->Init(*GetSuppressionContext());
+ libignore()->OnLibraryLoaded(0);
+}
+
+} // namespace __tsan
+
+static SignalContext *SigCtx(ThreadState *thr) {
+ SignalContext *ctx = (SignalContext*)thr->signal_ctx;
+ if (ctx == 0 && thr->is_alive) {
+ ScopedInRtl in_rtl;
+ ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
+ thr->signal_ctx = ctx;
+ }
+ return ctx;
+}
+
+static unsigned g_thread_finalize_key;
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ private:
+ ThreadState *const thr_;
+ const uptr pc_;
+ const int in_rtl_;
+ bool in_ignored_lib_;
+};
+
+ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
+ uptr pc)
+ : thr_(thr)
+ , pc_(pc)
+ , in_rtl_(thr->in_rtl)
+ , in_ignored_lib_(false) {
+ if (thr_->in_rtl == 0) {
+ Initialize(thr);
+ FuncEntry(thr, pc);
+ thr_->in_rtl++;
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
+ } else {
+ thr_->in_rtl++;
+ }
+ if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
+ in_ignored_lib_ = true;
+ thr_->in_ignored_lib = true;
+ ThreadIgnoreBegin(thr_, pc_);
+ }
+}
+
+ScopedInterceptor::~ScopedInterceptor() {
+ if (in_ignored_lib_) {
+ thr_->in_ignored_lib = false;
+ ThreadIgnoreEnd(thr_, pc_);
+ }
+ thr_->in_rtl--;
+ if (thr_->in_rtl == 0) {
+ FuncExit(thr_);
+ ProcessPendingSignals(thr_);
+ }
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+}
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread(); \
+ StatInc(thr, StatInterceptor); \
+ StatInc(thr, StatInt_##func); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (REAL(func) == 0) { \
+ Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ } \
+ if (thr->in_rtl > 1 || thr->in_ignored_lib) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+
+#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
+
+struct BlockingCall {
+ explicit BlockingCall(ThreadState *thr)
+ : ctx(SigCtx(thr)) {
+ ctx->in_blocking_func++;
+ }
+
+ ~BlockingCall() {
+ ctx->in_blocking_func--;
+ }
+
+ SignalContext *ctx;
+};
+
+TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
+ SCOPED_TSAN_INTERCEPTOR(sleep, sec);
+ unsigned res = BLOCK_REAL(sleep)(sec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, usleep, long_t usec) {
+ SCOPED_TSAN_INTERCEPTOR(usleep, usec);
+ int res = BLOCK_REAL(usleep)(usec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
+ SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
+ int res = BLOCK_REAL(nanosleep)(req, rem);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
+ // dlopen will execute global constructors, so it must be not in rtl.
+ CHECK_EQ(thr->in_rtl, 1);
+ thr->in_rtl = 0;
+ void *res = REAL(dlopen)(filename, flag);
+ thr->in_rtl = 1;
+ libignore()->OnLibraryLoaded(filename);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, dlclose, void *handle) {
+ SCOPED_INTERCEPTOR_RAW(dlclose, handle);
+ // dlclose will execute global destructors, so it must be not in rtl.
+ CHECK_EQ(thr->in_rtl, 1);
+ thr->in_rtl = 0;
+ int res = REAL(dlclose)(handle);
+ thr->in_rtl = 1;
+ libignore()->OnLibraryUnloaded();
+ return res;
+}
+
+class AtExitContext {
+ public:
+ AtExitContext()
+ : mtx_(MutexTypeAtExit, StatMtxAtExit)
+ , pos_() {
+ }
+
+ typedef void(*atexit_t)();
+
+ int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
+ atexit_t f, void *arg) {
+ Lock l(&mtx_);
+ if (pos_ == kMaxAtExit)
+ return 1;
+ Release(thr, pc, (uptr)this);
+ stack_[pos_] = f;
+ args_[pos_] = arg;
+ is_on_exits_[pos_] = is_on_exit;
+ pos_++;
+ return 0;
+ }
+
+ void exit(ThreadState *thr, uptr pc) {
+ CHECK_EQ(thr->in_rtl, 0);
+ for (;;) {
+ atexit_t f = 0;
+ void *arg = 0;
+ bool is_on_exit = false;
+ {
+ Lock l(&mtx_);
+ if (pos_) {
+ pos_--;
+ f = stack_[pos_];
+ arg = args_[pos_];
+ is_on_exit = is_on_exits_[pos_];
+ ScopedInRtl in_rtl;
+ Acquire(thr, pc, (uptr)this);
+ }
+ }
+ if (f == 0)
+ break;
+ DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
+ CHECK_EQ(thr->in_rtl, 0);
+ if (is_on_exit)
+ ((void(*)(int status, void *arg))f)(0, arg);
+ else
+ ((void(*)(void *arg, void *dso))f)(arg, 0);
+ }
+ }
+
+ private:
+ static const int kMaxAtExit = 128;
+ Mutex mtx_;
+ atexit_t stack_[kMaxAtExit];
+ void *args_[kMaxAtExit];
+ bool is_on_exits_[kMaxAtExit];
+ int pos_;
+};
+
+static AtExitContext *atexit_ctx;
+
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(atexit, f);
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+}
+
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+ if (cur_thread()->in_symbolizer)
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ if (dso) {
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(f, arg, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+ }
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+}
+
+// Cleanup old bufs.
+static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp <= sp) {
+ uptr sz = thr->jmp_bufs.Size();
+ thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1];
+ thr->jmp_bufs.PopBack();
+ i--;
+ }
+ }
+}
+
+static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
+ if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap
+ return;
+ // Cleanup old bufs.
+ JmpBufGarbageCollect(thr, sp);
+ // Remember the buf.
+ JmpBuf *buf = thr->jmp_bufs.PushBack();
+ buf->sp = sp;
+ buf->mangled_sp = mangled_sp;
+ buf->shadow_stack_pos = thr->shadow_stack_pos;
+}
+
+static void LongJmp(ThreadState *thr, uptr *env) {
+ uptr mangled_sp = env[6];
+ // Find the saved buf by mangled_sp.
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->mangled_sp == mangled_sp) {
+ CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
+ // Unwind the stack.
+ while (thr->shadow_stack_pos > buf->shadow_stack_pos)
+ FuncExit(thr);
+ JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
+ return;
+ }
+ }
+ Printf("ThreadSanitizer: can't find longjmp buf\n");
+ CHECK(0);
+}
+
+// FIXME: put everything below into a common extern "C" block?
+extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
+ ScopedInRtl in_rtl;
+ SetJmp(cur_thread(), sp, mangled_sp);
+}
+
+// Not called. Merely to satisfy TSAN_INTERCEPT().
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor_setjmp(void *env);
+extern "C" int __interceptor_setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+// FIXME: any reason to have a separate declaration?
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor__setjmp(void *env);
+extern "C" int __interceptor__setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor_sigsetjmp(void *env);
+extern "C" int __interceptor_sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor___sigsetjmp(void *env);
+extern "C" int __interceptor___sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" int setjmp(void *env);
+extern "C" int _setjmp(void *env);
+extern "C" int sigsetjmp(void *env);
+extern "C" int __sigsetjmp(void *env);
+DEFINE_REAL(int, setjmp, void *env)
+DEFINE_REAL(int, _setjmp, void *env)
+DEFINE_REAL(int, sigsetjmp, void *env)
+DEFINE_REAL(int, __sigsetjmp, void *env)
+
+TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
+ {
+ SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(longjmp)(env, val);
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
+ {
+ SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(siglongjmp)(env, val);
+}
+
+TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_malloc(size);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(malloc, size);
+ p = user_alloc(thr, pc, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
+
+TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_calloc(size, n);
+ if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n))
+ return AllocatorReturnNull();
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(calloc, size, n);
+ p = user_alloc(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ }
+ invoke_malloc_hook(p, n * size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (cur_thread()->in_symbolizer)
+ return __libc_realloc(p, size);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(realloc, p, size);
+ p = user_realloc(thr, pc, p, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void, free, void *p) {
+ if (p == 0)
+ return;
+ if (cur_thread()->in_symbolizer)
+ return __libc_free(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void, cfree, void *p) {
+ if (p == 0)
+ return;
+ if (cur_thread()->in_symbolizer)
+ return __libc_free(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(cfree, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
+ SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
+ return user_alloc_usable_size(thr, pc, p);
+}
+
+#define OPERATOR_NEW_BODY(mangled_name) \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_malloc(size); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_alloc(thr, pc, size); \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size);
+void *operator new(__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znwm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size);
+void *operator new[](__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znam);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t);
+}
+
+#define OPERATOR_DELETE_BODY(mangled_name) \
+ if (ptr == 0) return; \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_free(ptr); \
+ invoke_free_hook(ptr); \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
+ user_free(thr, pc, ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr);
+void operator delete(void *ptr) {
+ OPERATOR_DELETE_BODY(_ZdlPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr);
+void operator delete[](void *ptr) {
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&);
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&);
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+}
+
+TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
+ SCOPED_TSAN_INTERCEPTOR(strlen, s);
+ uptr len = internal_strlen(s);
+ MemoryAccessRange(thr, pc, (uptr)s, len + 1, false);
+ return len;
+}
+
+TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
+ SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ return internal_memset(dst, v, size);
+}
+
+TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
+ SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ MemoryAccessRange(thr, pc, (uptr)src, size, false);
+ return internal_memcpy(dst, src, size);
+}
+
+TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n);
+ int res = 0;
+ uptr len = 0;
+ for (; len < n; len++) {
+ if ((res = ((unsigned char*)s1)[len] - ((unsigned char*)s2)[len]))
+ break;
+ }
+ MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
+ void *res = REAL(memchr)(s, c, n);
+ uptr len = res ? (char*)res - (char*)s + 1 : n;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
+ MemoryAccessRange(thr, pc, (uptr)s, n, false);
+ return REAL(memrchr)(s, c, n);
+}
+
+TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, n, false);
+ return REAL(memmove)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(char*, strchr, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strchr, s, c);
+ char *res = REAL(strchr)(s, c);
+ uptr len = res ? (char*)res - (char*)s + 1 : internal_strlen(s) + 1;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c);
+ char *res = REAL(strchrnul)(s, c);
+ uptr len = (char*)res - (char*)s + 1;
+ MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) {
+ SCOPED_TSAN_INTERCEPTOR(strrchr, s, c);
+ MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false);
+ return REAL(strrchr)(s, c);
+}
+
+TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
+ SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
+ uptr srclen = internal_strlen(src);
+ MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
+ MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
+ return REAL(strcpy)(dst, src); // NOLINT
+}
+
+TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
+ return REAL(strncpy)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(const char*, strstr, const char *s1, const char *s2) {
+ SCOPED_TSAN_INTERCEPTOR(strstr, s1, s2);
+ const char *res = REAL(strstr)(s1, s2);
+ uptr len1 = internal_strlen(s1);
+ uptr len2 = internal_strlen(s2);
+ MemoryAccessRange(thr, pc, (uptr)s1, len1 + 1, false);
+ MemoryAccessRange(thr, pc, (uptr)s2, len2 + 1, false);
+ return res;
+}
+
+TSAN_INTERCEPTOR(char*, strdup, const char *str) {
+ SCOPED_TSAN_INTERCEPTOR(strdup, str);
+ // strdup will call malloc, so no instrumentation is required here.
+ return REAL(strdup)(str);
+}
+
+static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
+ if (*addr) {
+ if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
+ if (flags & MAP_FIXED) {
+ errno = EINVAL;
+ return false;
+ } else {
+ *addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
+ int flags, int fd, unsigned off) {
+ SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
+ if (!fix_mmap_addr(&addr, sz, flags))
+ return MAP_FAILED;
+ void *res = REAL(mmap)(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
+ int flags, int fd, u64 off) {
+ SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
+ if (!fix_mmap_addr(&addr, sz, flags))
+ return MAP_FAILED;
+ void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ DontNeedShadowFor((uptr)addr, sz);
+ int res = REAL(munmap)(addr, sz);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
+
+TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(valloc, sz);
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
+}
+
+TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
+ sz = RoundUp(sz, GetPageSizeCached());
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
+}
+
+TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
+ *memptr = user_alloc(thr, pc, sz, align);
+ return 0;
+}
+
+// Used in thread-safe function static initialization.
+extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == 0) {
+ if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
+ return 1;
+ } else if (cmp == 1) {
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ internal_sched_yield();
+ }
+ }
+}
+
+extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_release(atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
+ Release(thr, pc, (uptr)g);
+ atomic_store(g, 1, memory_order_release);
+}
+
+extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
+ atomic_store(g, 0, memory_order_relaxed);
+}
+
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ return;
+ }
+ {
+ ScopedInRtl in_rtl;
+ ThreadState *thr = cur_thread();
+ ThreadFinish(thr);
+ SignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ UnmapOrDie(sctx, sizeof(*sctx));
+ }
+ }
+}
+
+
+struct ThreadParam {
+ void* (*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__tsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ int tid = 0;
+ {
+ ThreadState *thr = cur_thread();
+ ScopedInRtl in_rtl;
+ if (pthread_setspecific(g_thread_finalize_key,
+ (void *)kPthreadDestructorIterations)) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ pthread_yield();
+ atomic_store(&p->tid, 0, memory_order_release);
+ ThreadStart(thr, tid, GetTid());
+ CHECK_EQ(thr->in_rtl, 1);
+ }
+ void *res = callback(param);
+ // Prevent the callback from being tail called,
+ // it mixes up stack traces.
+ volatile int foo = 42;
+ foo++;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_create,
+ void *th, void *attr, void *(*callback)(void*), void * param) {
+ SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+ __sanitizer_pthread_attr_t myattr;
+ if (attr == 0) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ int detached = 0;
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ AdjustStackSizeLinux(attr);
+
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ if (res == 0) {
+ int tid = ThreadCreate(thr, pc, *(uptr*)th, detached);
+ CHECK_NE(tid, 0);
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ pthread_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ if (res == 0) {
+ ThreadJoin(thr, pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
+ int res = REAL(pthread_mutex_init)(m, a);
+ if (res == 0) {
+ bool recursive = false;
+ if (a) {
+ int type = 0;
+ if (pthread_mutexattr_gettype(a, &type) == 0)
+ recursive = (type == PTHREAD_MUTEX_RECURSIVE
+ || type == PTHREAD_MUTEX_RECURSIVE_NP);
+ }
+ MutexCreate(thr, pc, (uptr)m, false, recursive, false);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ int res = REAL(pthread_mutex_destroy)(m);
+ if (res == 0 || res == EBUSY) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == EOWNERDEAD)
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ int res = REAL(pthread_mutex_timedlock)(m, abstime);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ int res = REAL(pthread_spin_init)(m, pshared);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m, false, false, false);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ int res = REAL(pthread_spin_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ int res = REAL(pthread_spin_lock)(m);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_unlock)(m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ int res = REAL(pthread_rwlock_init)(m, a);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m, true, false, false);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ int res = REAL(pthread_rwlock_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ if (res == 0) {
+ MutexReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0) {
+ MutexReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0) {
+ MutexReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0) {
+ MutexLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
+ MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_unlock)(m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
+ MemoryWrite(thr, pc, (uptr)c, kSizeLog1);
+ int res = REAL(pthread_cond_destroy)(c);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m,
+ void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
+ MutexUnlock(thr, pc, (uptr)m);
+ MemoryRead(thr, pc, (uptr)c, kSizeLog1);
+ int res = REAL(pthread_cond_timedwait)(c, m, abstime);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_init)(b, a, count);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_destroy)(b);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
+ Release(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_wait)(b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
+ Acquire(thr, pc, (uptr)b);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+ SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
+ // Using SCOPED_INTERCEPTOR_RAW, because if we are called from an ignored lib,
+ // the user callback must be executed with thr->in_rtl == 0.
+ if (o == 0 || f == 0)
+ return EINVAL;
+ atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
+ memory_order_relaxed)) {
+ const int old_in_rtl = thr->in_rtl;
+ thr->in_rtl = 0;
+ (*f)();
+ CHECK_EQ(thr->in_rtl, 0);
+ thr->in_rtl = old_in_rtl;
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)o);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ pthread_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)o);
+ }
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) {
+ SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value);
+ int res = REAL(sem_init)(s, pshared, value);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_destroy, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_destroy, s);
+ int res = REAL(sem_destroy)(s);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_wait, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_wait, s);
+ int res = BLOCK_REAL(sem_wait)(s);
+ if (res == 0) {
+ Acquire(thr, pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_trywait, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_trywait, s);
+ int res = BLOCK_REAL(sem_trywait)(s);
+ if (res == 0) {
+ Acquire(thr, pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime);
+ int res = BLOCK_REAL(sem_timedwait)(s, abstime);
+ if (res == 0) {
+ Acquire(thr, pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_post, void *s) {
+ SCOPED_TSAN_INTERCEPTOR(sem_post, s);
+ Release(thr, pc, (uptr)s);
+ int res = REAL(sem_post)(s);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
+ SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval);
+ int res = REAL(sem_getvalue)(s, sval);
+ if (res == 0) {
+ Acquire(thr, pc, (uptr)s);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
+ return REAL(__xstat)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
+ return REAL(__xstat)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
+ return REAL(__xstat64)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
+ return REAL(__xstat64)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
+ return REAL(__lxstat)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
+ return REAL(__lxstat)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
+ return REAL(__lxstat64)(version, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
+ return REAL(__lxstat64)(0, path, buf);
+}
+
+TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(version, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(0, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(0, fd, buf);
+}
+
+TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+ int fd = REAL(open)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+ int fd = REAL(open64)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
+ int fd = REAL(creat)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
+ int fd = REAL(creat64)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, dup, int oldfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
+ int newfd = REAL(dup)(oldfd);
+ if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
+ FdDup(thr, pc, oldfd, newfd);
+ return newfd;
+}
+
+TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
+ int newfd2 = REAL(dup2)(oldfd, newfd);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2);
+ return newfd2;
+}
+
+TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
+ int newfd2 = REAL(dup3)(oldfd, newfd, flags);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2);
+ return newfd2;
+}
+
+TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
+ int fd = REAL(eventfd)(initval, flags);
+ if (fd >= 0)
+ FdEventCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ fd = REAL(signalfd)(fd, mask, flags);
+ if (fd >= 0)
+ FdSignalCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, inotify_init, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
+ int fd = REAL(inotify_init)(fake);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
+ int fd = REAL(inotify_init1)(flags);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
+ SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
+ int fd = REAL(socket)(domain, type, protocol);
+ if (fd >= 0)
+ FdSocketCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
+ SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
+ int res = REAL(socketpair)(domain, type, protocol, fd);
+ if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
+ FdPipeCreate(thr, pc, fd[0], fd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
+ FdSocketConnecting(thr, pc, fd);
+ int res = REAL(connect)(fd, addr, addrlen);
+ if (res == 0 && fd >= 0)
+ FdSocketConnect(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
+ int res = REAL(bind)(fd, addr, addrlen);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
+ SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
+ int res = REAL(listen)(fd, backlog);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(close)(fd);
+}
+
+TSAN_INTERCEPTOR(int, __close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(__close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(__close)(fd);
+}
+
+// glibc guts
+TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
+ SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ int fds[64];
+ int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++) {
+ if (fds[i] > 0)
+ FdClose(thr, pc, fds[i]);
+ }
+ REAL(__res_iclose)(state, free_addr);
+}
+
+TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
+ SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
+ int res = REAL(pipe)(pipefd);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
+ int res = REAL(pipe2)(pipefd, flags);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
+ if (fd >= 0) {
+ FdAccess(thr, pc, fd);
+ FdRelease(thr, pc, fd);
+ }
+ int res = REAL(send)(fd, buf, len, flags);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
+ if (fd >= 0) {
+ FdAccess(thr, pc, fd);
+ FdRelease(thr, pc, fd);
+ }
+ int res = REAL(sendmsg)(fd, msg, flags);
+ return res;
+}
+
+TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
+ if (fd >= 0)
+ FdAccess(thr, pc, fd);
+ int res = REAL(recv)(fd, buf, len, flags);
+ if (res >= 0 && fd >= 0) {
+ FdAcquire(thr, pc, fd);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, unlink, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(unlink, path);
+ Release(thr, pc, File2addr(path));
+ int res = REAL(unlink)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
+ SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
+ void *res = REAL(fopen)(path, mode);
+ Acquire(thr, pc, File2addr(path));
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
+ SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
+ if (stream) {
+ int fd = fileno_unlocked(stream);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ }
+ void *res = REAL(freopen)(path, mode, stream);
+ Acquire(thr, pc, File2addr(path));
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, fclose, void *stream) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ {
+ SCOPED_TSAN_INTERCEPTOR(fclose, stream);
+ if (stream) {
+ int fd = fileno_unlocked(stream);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ }
+ }
+ return REAL(fclose)(stream);
+}
+
+TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ {
+ SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
+ MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
+ }
+ return REAL(fread)(ptr, size, nmemb, f);
+}
+
+TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ {
+ SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
+ MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
+ }
+ return REAL(fwrite)(p, size, nmemb, f);
+}
+
+TSAN_INTERCEPTOR(int, fflush, void *stream) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ {
+ SCOPED_TSAN_INTERCEPTOR(fflush, stream);
+ }
+ return REAL(fflush)(stream);
+}
+
+TSAN_INTERCEPTOR(void, abort, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(abort, fake);
+ REAL(fflush)(0);
+ REAL(abort)(fake);
+}
+
+TSAN_INTERCEPTOR(int, puts, const char *s) {
+ SCOPED_TSAN_INTERCEPTOR(puts, s);
+ MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false);
+ return REAL(puts)(s);
+}
+
+TSAN_INTERCEPTOR(int, rmdir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(rmdir, path);
+ Release(thr, pc, Dir2addr(path));
+ int res = REAL(rmdir)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, opendir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(opendir, path);
+ void *res = REAL(opendir)(path);
+ if (res != 0)
+ Acquire(thr, pc, Dir2addr(path));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ if (epfd >= 0 && fd >= 0)
+ FdAccess(thr, pc, fd);
+ if (op == EPOLL_CTL_ADD && epfd >= 0)
+ FdRelease(thr, pc, epfd);
+ int res = REAL(epoll_ctl)(epfd, op, fd, ev);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
+ my_siginfo_t *info, void *ctx) {
+ ThreadState *thr = cur_thread();
+ SignalContext *sctx = SigCtx(thr);
+ // Don't mess with synchronous signals.
+ if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send) ||
+ // If we are in blocking function, we can safely process it now
+ // (but check if we are in a recursive interceptor,
+ // i.e. pthread_join()->munmap()).
+ (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
+ int in_rtl = thr->in_rtl;
+ thr->in_rtl = 0;
+ CHECK_EQ(thr->in_signal_handler, false);
+ thr->in_signal_handler = true;
+ if (sigact)
+ sigactions[sig].sa_sigaction(sig, info, ctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ CHECK_EQ(thr->in_signal_handler, true);
+ thr->in_signal_handler = false;
+ thr->in_rtl = in_rtl;
+ return;
+ }
+
+ if (sctx == 0)
+ return;
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed == false) {
+ signal->armed = true;
+ signal->sigaction = sigact;
+ if (info)
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ if (ctx)
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ sctx->pending_signal_count++;
+ }
+}
+
+static void rtl_sighandler(int sig) {
+ rtl_generic_sighandler(false, sig, 0, 0);
+}
+
+static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
+ rtl_generic_sighandler(true, sig, info, ctx);
+}
+
+TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
+ SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old);
+ if (old)
+ internal_memcpy(old, &sigactions[sig], sizeof(*old));
+ if (act == 0)
+ return 0;
+ internal_memcpy(&sigactions[sig], act, sizeof(*act));
+ sigaction_t newact;
+ internal_memcpy(&newact, act, sizeof(newact));
+ REAL(sigfillset)(&newact.sa_mask);
+ if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
+ if (newact.sa_flags & SA_SIGINFO)
+ newact.sa_sigaction = rtl_sigaction;
+ else
+ newact.sa_handler = rtl_sighandler;
+ }
+ int res = REAL(sigaction)(sig, &newact, 0);
+ return res;
+}
+
+TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
+ sigaction_t act;
+ act.sa_handler = h;
+ REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ sigaction_t old;
+ int res = sigaction(sig, &act, &old);
+ if (res)
+ return SIG_ERR;
+ return old.sa_handler;
+}
+
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, raise, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(raise, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ sctx->int_signal_send = sig;
+ int res = REAL(raise)(sig);
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (pid == (int)internal_getpid()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(kill)(pid, sig);
+ if (pid == (int)internal_getpid()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
+ SignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (tid == pthread_self()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(pthread_kill)(tid, sig);
+ if (tid == pthread_self()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
+ SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
+ // It's intercepted merely to process pending signals.
+ return REAL(gettimeofday)(tv, tz);
+}
+
+TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
+ void *hints, void *rv) {
+ SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
+ // We miss atomic synchronization in getaddrinfo,
+ // and can report false race between malloc and free
+ // inside of getaddrinfo. So ignore memory accesses.
+ ThreadIgnoreBegin(thr, pc);
+ // getaddrinfo calls fopen, which can be intercepted by user.
+ thr->in_rtl--;
+ CHECK_EQ(thr->in_rtl, 0);
+ int res = REAL(getaddrinfo)(node, service, hints, rv);
+ thr->in_rtl++;
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ if (flags()->verbosity > 0)
+ Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
+}
+
+TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, fork, int fake) {
+ SCOPED_INTERCEPTOR_RAW(fork, fake);
+ int pid = REAL(fork)(fake);
+ if (pid == 0) {
+ // child
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ }
+ return pid;
+}
+
+static int OnExit(ThreadState *thr) {
+ int status = Finalize(thr);
+ REAL(fflush)(0);
+ return status;
+}
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr caller_pc;
+ const uptr pc;
+};
+
+static void HandleRecvmsg(ThreadState *thr, uptr pc,
+ __sanitizer_msghdr *msg) {
+ int fds[64];
+ int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++)
+ FdEventCreate(thr, pc, fds[i]);
+}
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+// Causes interceptor recursion (getpwuid_r() calls fopen())
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+// Causes interceptor recursion (getaddrinfo() and fopen())
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+#undef SANITIZER_INTERCEPT_GETNAMEINFO
+// Causes interceptor recursion (glob64() calls lstat64())
+#undef SANITIZER_INTERCEPT_GLOB
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
+ do { \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
+ FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
+
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
+
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ CTX()->thread_registry->SetThreadNameByUserId(thread, name)
+
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
+
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
+ OnExit(((TsanInterceptorContext *) ctx)->thr)
+
+#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
+ MutexLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
+ MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
+ MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, msg)
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+#define TSAN_SYSCALL() \
+ ThreadState *thr = cur_thread(); \
+ ScopedSyscall scoped_syscall(thr) \
+/**/
+
+struct ScopedSyscall {
+ ThreadState *thr;
+
+ explicit ScopedSyscall(ThreadState *thr)
+ : thr(thr) {
+ if (thr->in_rtl == 0)
+ Initialize(thr);
+ thr->in_rtl++;
+ }
+
+ ~ScopedSyscall() {
+ thr->in_rtl--;
+ if (thr->in_rtl == 0)
+ ProcessPendingSignals(thr);
+ }
+};
+
+static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
+ TSAN_SYSCALL();
+ MemoryAccessRange(thr, pc, p, s, write);
+}
+
+static void syscall_acquire(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Acquire(thr, pc, addr);
+ Printf("syscall_acquire(%p)\n", addr);
+}
+
+static void syscall_release(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Printf("syscall_release(%p)\n", addr);
+ Release(thr, pc, addr);
+}
+
+static void syscall_fd_close(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdClose(thr, pc, fd);
+}
+
+static USED void syscall_fd_acquire(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdAcquire(thr, pc, fd);
+ Printf("syscall_fd_acquire(%p)\n", fd);
+}
+
+static USED void syscall_fd_release(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ Printf("syscall_fd_release(%p)\n", fd);
+ FdRelease(thr, pc, fd);
+}
+
+static void syscall_pre_fork(uptr pc) {
+ TSAN_SYSCALL();
+}
+
+static void syscall_post_fork(uptr pc, int res) {
+ TSAN_SYSCALL();
+ if (res == 0) {
+ // child
+ FdOnFork(thr, pc);
+ } else if (res > 0) {
+ // parent
+ }
+}
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
+
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
+
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_RELEASE(addr) \
+ syscall_release(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_PRE_FORK() \
+ syscall_pre_fork(GET_CALLER_PC())
+
+#define COMMON_SYSCALL_POST_FORK(res) \
+ syscall_post_fork(GET_CALLER_PC(), res)
+
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+
+namespace __tsan {
+
+static void finalize(void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ atexit_ctx->exit(thr, pc);
+ int status = Finalize(thr);
+ REAL(fflush)(0);
+ if (status)
+ REAL(_exit)(status);
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ CHECK_EQ(thr->in_rtl, 0);
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
+ return;
+ Context *ctx = CTX();
+ thr->in_signal_handler = true;
+ sctx->pending_signal_count = 0;
+ // These are too big for stack.
+ static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
+ REAL(sigfillset)(&emptyset);
+ pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ if (sigactions[sig].sa_handler != SIG_DFL
+ && sigactions[sig].sa_handler != SIG_IGN) {
+ // Insure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 0;
+ if (signal->sigaction)
+ sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
+ else
+ sigactions[sig].sa_handler(sig);
+ if (flags()->report_bugs && errno != 0) {
+ ScopedInRtl in_rtl;
+ __tsan::StackTrace stack;
+ uptr pc = signal->sigaction ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ pc += 1; // return address is expected, OutputReport() will undo this
+ stack.Init(&pc, 1);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, rep, stack)) {
+ rep.AddStack(&stack);
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ }
+ }
+ errno = saved_errno;
+ }
+ }
+ }
+ pthread_sigmask(SIG_SETMASK, &oldset, 0);
+ CHECK_EQ(thr->in_signal_handler, true);
+ thr->in_signal_handler = false;
+}
+
+static void unreachable() {
+ Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Die();
+}
+
+void InitializeInterceptors() {
+ CHECK_GT(cur_thread()->in_rtl, 0);
+
+ // We need to setup it early, because functions like dlsym() can call it.
+ REAL(memset) = internal_memset;
+ REAL(memcpy) = internal_memcpy;
+ REAL(memcmp) = internal_memcmp;
+
+ // Instruct libc malloc to consume less memory.
+ mallopt(1, 0); // M_MXFAST
+ mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+
+ SANITIZER_COMMON_INTERCEPTORS_INIT;
+
+ // We can not use TSAN_INTERCEPT to get setjmp addr,
+ // because it does &setjmp and setjmp is not present in some versions of libc.
+ using __interception::GetRealFunctionAddress;
+ GetRealFunctionAddress("setjmp", (uptr*)&REAL(setjmp), 0, 0);
+ GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
+ GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+
+ TSAN_INTERCEPT(longjmp);
+ TSAN_INTERCEPT(siglongjmp);
+
+ TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(__libc_memalign);
+ TSAN_INTERCEPT(calloc);
+ TSAN_INTERCEPT(realloc);
+ TSAN_INTERCEPT(free);
+ TSAN_INTERCEPT(cfree);
+ TSAN_INTERCEPT(mmap);
+ TSAN_INTERCEPT(mmap64);
+ TSAN_INTERCEPT(munmap);
+ TSAN_INTERCEPT(memalign);
+ TSAN_INTERCEPT(valloc);
+ TSAN_INTERCEPT(pvalloc);
+ TSAN_INTERCEPT(posix_memalign);
+
+ TSAN_INTERCEPT(strlen);
+ TSAN_INTERCEPT(memset);
+ TSAN_INTERCEPT(memcpy);
+ TSAN_INTERCEPT(memchr);
+ TSAN_INTERCEPT(memrchr);
+ TSAN_INTERCEPT(memmove);
+ TSAN_INTERCEPT(memcmp);
+ TSAN_INTERCEPT(strchr);
+ TSAN_INTERCEPT(strchrnul);
+ TSAN_INTERCEPT(strrchr);
+ TSAN_INTERCEPT(strcpy); // NOLINT
+ TSAN_INTERCEPT(strncpy);
+ TSAN_INTERCEPT(strstr);
+ TSAN_INTERCEPT(strdup);
+
+ TSAN_INTERCEPT(pthread_create);
+ TSAN_INTERCEPT(pthread_join);
+ TSAN_INTERCEPT(pthread_detach);
+
+ TSAN_INTERCEPT(pthread_mutex_init);
+ TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_trylock);
+ TSAN_INTERCEPT(pthread_mutex_timedlock);
+
+ TSAN_INTERCEPT(pthread_spin_init);
+ TSAN_INTERCEPT(pthread_spin_destroy);
+ TSAN_INTERCEPT(pthread_spin_lock);
+ TSAN_INTERCEPT(pthread_spin_trylock);
+ TSAN_INTERCEPT(pthread_spin_unlock);
+
+ TSAN_INTERCEPT(pthread_rwlock_init);
+ TSAN_INTERCEPT(pthread_rwlock_destroy);
+ TSAN_INTERCEPT(pthread_rwlock_rdlock);
+ TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_wrlock);
+ TSAN_INTERCEPT(pthread_rwlock_trywrlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
+ TSAN_INTERCEPT(pthread_rwlock_unlock);
+
+ INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+
+ TSAN_INTERCEPT(pthread_barrier_init);
+ TSAN_INTERCEPT(pthread_barrier_destroy);
+ TSAN_INTERCEPT(pthread_barrier_wait);
+
+ TSAN_INTERCEPT(pthread_once);
+
+ TSAN_INTERCEPT(sem_init);
+ TSAN_INTERCEPT(sem_destroy);
+ TSAN_INTERCEPT(sem_wait);
+ TSAN_INTERCEPT(sem_trywait);
+ TSAN_INTERCEPT(sem_timedwait);
+ TSAN_INTERCEPT(sem_post);
+ TSAN_INTERCEPT(sem_getvalue);
+
+ TSAN_INTERCEPT(stat);
+ TSAN_INTERCEPT(__xstat);
+ TSAN_INTERCEPT(stat64);
+ TSAN_INTERCEPT(__xstat64);
+ TSAN_INTERCEPT(lstat);
+ TSAN_INTERCEPT(__lxstat);
+ TSAN_INTERCEPT(lstat64);
+ TSAN_INTERCEPT(__lxstat64);
+ TSAN_INTERCEPT(fstat);
+ TSAN_INTERCEPT(__fxstat);
+ TSAN_INTERCEPT(fstat64);
+ TSAN_INTERCEPT(__fxstat64);
+ TSAN_INTERCEPT(open);
+ TSAN_INTERCEPT(open64);
+ TSAN_INTERCEPT(creat);
+ TSAN_INTERCEPT(creat64);
+ TSAN_INTERCEPT(dup);
+ TSAN_INTERCEPT(dup2);
+ TSAN_INTERCEPT(dup3);
+ TSAN_INTERCEPT(eventfd);
+ TSAN_INTERCEPT(signalfd);
+ TSAN_INTERCEPT(inotify_init);
+ TSAN_INTERCEPT(inotify_init1);
+ TSAN_INTERCEPT(socket);
+ TSAN_INTERCEPT(socketpair);
+ TSAN_INTERCEPT(connect);
+ TSAN_INTERCEPT(bind);
+ TSAN_INTERCEPT(listen);
+ TSAN_INTERCEPT(epoll_create);
+ TSAN_INTERCEPT(epoll_create1);
+ TSAN_INTERCEPT(close);
+ TSAN_INTERCEPT(__close);
+ TSAN_INTERCEPT(__res_iclose);
+ TSAN_INTERCEPT(pipe);
+ TSAN_INTERCEPT(pipe2);
+
+ TSAN_INTERCEPT(send);
+ TSAN_INTERCEPT(sendmsg);
+ TSAN_INTERCEPT(recv);
+
+ TSAN_INTERCEPT(unlink);
+ TSAN_INTERCEPT(fopen);
+ TSAN_INTERCEPT(freopen);
+ TSAN_INTERCEPT(fclose);
+ TSAN_INTERCEPT(fread);
+ TSAN_INTERCEPT(fwrite);
+ TSAN_INTERCEPT(fflush);
+ TSAN_INTERCEPT(abort);
+ TSAN_INTERCEPT(puts);
+ TSAN_INTERCEPT(rmdir);
+ TSAN_INTERCEPT(opendir);
+
+ TSAN_INTERCEPT(epoll_ctl);
+ TSAN_INTERCEPT(epoll_wait);
+
+ TSAN_INTERCEPT(sigaction);
+ TSAN_INTERCEPT(signal);
+ TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(raise);
+ TSAN_INTERCEPT(kill);
+ TSAN_INTERCEPT(pthread_kill);
+ TSAN_INTERCEPT(sleep);
+ TSAN_INTERCEPT(usleep);
+ TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(gettimeofday);
+ TSAN_INTERCEPT(getaddrinfo);
+
+ TSAN_INTERCEPT(mlock);
+ TSAN_INTERCEPT(munlock);
+ TSAN_INTERCEPT(mlockall);
+ TSAN_INTERCEPT(munlockall);
+
+ TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(dlopen);
+ TSAN_INTERCEPT(dlclose);
+ TSAN_INTERCEPT(on_exit);
+ TSAN_INTERCEPT(__cxa_atexit);
+ TSAN_INTERCEPT(_exit);
+
+ // Need to setup it, because interceptors check that the function is resolved.
+ // But atexit is emitted directly into the module, so can't be resolved.
+ REAL(atexit) = (int(*)(void(*)()))unreachable;
+ atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
+ AtExitContext();
+
+ if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
+ Printf("ThreadSanitizer: failed to setup atexit callback\n");
+ Die();
+ }
+
+ if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+ Printf("ThreadSanitizer: failed to create thread key\n");
+ Die();
+ }
+
+ FdInit();
+}
+
+void internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal users
+ // signals.
+ __sanitizer_kernel_sigset_t set, old;
+ internal_sigfillset(&set);
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ void *th;
+ REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
+ REAL(pthread_detach)(th);
+ internal_sigprocmask(SIG_SETMASK, &old, 0);
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface.cc b/gcc-4.9/libsanitizer/tsan/tsan_interface.cc
new file mode 100644
index 000000000..e056bd465
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface.cc
@@ -0,0 +1,96 @@
+//===-- tsan_interface.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+
+void __tsan_init() {
+ Initialize(cur_thread());
+}
+
+void __tsan_read16(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+u16 __tsan_unaligned_read2(const uu16 *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
+ return *addr;
+}
+
+u32 __tsan_unaligned_read4(const uu32 *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
+ return *addr;
+}
+
+u64 __tsan_unaligned_read8(const uu64 *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
+ return *addr;
+}
+
+void __tsan_unaligned_write2(uu16 *addr, u16 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
+ *addr = v;
+}
+
+void __tsan_unaligned_write4(uu32 *addr, u32 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
+ *addr = v;
+}
+
+void __tsan_unaligned_write8(uu64 *addr, u64 v) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+ *addr = v;
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uint16_t __sanitizer_unaligned_load16(void *addr)
+ ALIAS("__tsan_unaligned_read2");
+SANITIZER_INTERFACE_ATTRIBUTE
+uint32_t __sanitizer_unaligned_load32(void *addr)
+ ALIAS("__tsan_unaligned_read4");
+SANITIZER_INTERFACE_ATTRIBUTE
+uint64_t __sanitizer_unaligned_load64(void *addr)
+ ALIAS("__tsan_unaligned_read8");
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(void *addr, uint16_t v)
+ ALIAS("__tsan_unaligned_write2");
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(void *addr, uint32_t v)
+ ALIAS("__tsan_unaligned_write4");
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(void *addr, uint64_t v)
+ ALIAS("__tsan_unaligned_write8");
+}
+
+void __tsan_acquire(void *addr) {
+ Acquire(cur_thread(), CALLERPC, (uptr)addr);
+}
+
+void __tsan_release(void *addr) {
+ Release(cur_thread(), CALLERPC, (uptr)addr);
+}
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface.h b/gcc-4.9/libsanitizer/tsan/tsan_interface.h
new file mode 100644
index 000000000..63de7b2ab
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface.h
@@ -0,0 +1,65 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// The functions declared in this header will be inserted by the instrumentation
+// module.
+// This header can be included by the instrumented program or by TSan tests.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_H
+#define TSAN_INTERFACE_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE u16 __tsan_unaligned_read2(const uu16 *addr);
+SANITIZER_INTERFACE_ATTRIBUTE u32 __tsan_unaligned_read4(const uu32 *addr);
+SANITIZER_INTERFACE_ATTRIBUTE u64 __tsan_unaligned_read8(const uu64 *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(uu16 *addr, u16 v);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(uu32 *addr, u32 v);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(uu64 *addr, u64 v);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_vptr_update(void **vptr_p, void *new_val);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range(void *addr, unsigned long size); // NOLINT
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.cc b/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.cc
new file mode 100644
index 000000000..38224f429
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.cc
@@ -0,0 +1,462 @@
+//===-- tsan_interface_ann.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_interface_ann.h"
+#include "tsan_mutex.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+#include "tsan_vector.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+namespace __tsan {
+
+class ScopedAnnotation {
+ public:
+ ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
+ uptr pc)
+ : thr_(thr)
+ , in_rtl_(thr->in_rtl) {
+ CHECK_EQ(thr_->in_rtl, 0);
+ FuncEntry(thr_, pc);
+ thr_->in_rtl++;
+ DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
+ }
+
+ ~ScopedAnnotation() {
+ thr_->in_rtl--;
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *const thr_;
+ const int in_rtl_;
+};
+
+#define SCOPED_ANNOTATION(typ) \
+ if (!flags()->enable_annotations) \
+ return; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ StatInc(thr, StatAnnotation); \
+ StatInc(thr, Stat##typ); \
+ ScopedAnnotation sa(thr, __FUNCTION__, f, l, caller_pc); \
+ const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+static const int kMaxDescLen = 128;
+
+struct ExpectRace {
+ ExpectRace *next;
+ ExpectRace *prev;
+ int hitcount;
+ int addcount;
+ uptr addr;
+ uptr size;
+ char *file;
+ int line;
+ char desc[kMaxDescLen];
+};
+
+struct DynamicAnnContext {
+ Mutex mtx;
+ ExpectRace expect;
+ ExpectRace benign;
+
+ DynamicAnnContext()
+ : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
+ }
+};
+
+static DynamicAnnContext *dyn_ann_ctx;
+static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
+
+static void AddExpectRace(ExpectRace *list,
+ char *f, int l, uptr addr, uptr size, char *desc) {
+ ExpectRace *race = list->next;
+ for (; race != list; race = race->next) {
+ if (race->addr == addr && race->size == size) {
+ race->addcount++;
+ return;
+ }
+ }
+ race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race->addr = addr;
+ race->size = size;
+ race->file = f;
+ race->line = l;
+ race->desc[0] = 0;
+ race->hitcount = 0;
+ race->addcount = 1;
+ if (desc) {
+ int i = 0;
+ for (; i < kMaxDescLen - 1 && desc[i]; i++)
+ race->desc[i] = desc[i];
+ race->desc[i] = 0;
+ }
+ race->prev = list;
+ race->next = list->next;
+ race->next->prev = race;
+ list->next = race;
+}
+
+static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ uptr maxbegin = max(race->addr, addr);
+ uptr minend = min(race->addr + race->size, addr + size);
+ if (maxbegin < minend)
+ return race;
+ }
+ return 0;
+}
+
+static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
+ ExpectRace *race = FindRace(list, addr, size);
+ if (race == 0 && AlternativeAddress(addr))
+ race = FindRace(list, AlternativeAddress(addr), size);
+ if (race == 0)
+ return false;
+ DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
+ race->desc, race->addr, (int)race->size, race->file, race->line);
+ race->hitcount++;
+ return true;
+}
+
+static void InitList(ExpectRace *list) {
+ list->next = list;
+ list->prev = list;
+}
+
+void InitializeDynamicAnnotations() {
+ dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
+ InitList(&dyn_ann_ctx->expect);
+ InitList(&dyn_ann_ctx->benign);
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ if (CheckContains(&dyn_ann_ctx->expect, addr, size))
+ return true;
+ if (CheckContains(&dyn_ann_ctx->benign, addr, size))
+ return true;
+ return false;
+}
+
+static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
+ int *unique_count, int *hit_count, int ExpectRace::*counter) {
+ ExpectRace *list = &dyn_ann_ctx->benign;
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ (*unique_count)++;
+ if (race->*counter == 0)
+ continue;
+ (*hit_count) += race->*counter;
+ uptr i = 0;
+ for (; i < matched->Size(); i++) {
+ ExpectRace *race0 = &(*matched)[i];
+ if (race->line == race0->line
+ && internal_strcmp(race->file, race0->file) == 0
+ && internal_strcmp(race->desc, race0->desc) == 0) {
+ race0->*counter += race->*counter;
+ break;
+ }
+ }
+ if (i == matched->Size())
+ matched->PushBack(*race);
+ }
+}
+
+void PrintMatchedBenignRaces() {
+ Lock lock(&dyn_ann_ctx->mtx);
+ int unique_count = 0;
+ int hit_count = 0;
+ int add_count = 0;
+ Vector<ExpectRace> hit_matched(MBlockScopedBuf);
+ CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
+ &ExpectRace::hitcount);
+ Vector<ExpectRace> add_matched(MBlockScopedBuf);
+ CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
+ &ExpectRace::addcount);
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
+ hit_count, (int)internal_getpid());
+ for (uptr i = 0; i < hit_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ hit_matched[i].hitcount, hit_matched[i].file,
+ hit_matched[i].line, hit_matched[i].desc);
+ }
+ }
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
+ " (pid=%d):\n",
+ add_count, unique_count, (int)internal_getpid());
+ for (uptr i = 0; i < add_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ add_matched[i].addcount, add_matched[i].file,
+ add_matched[i].line, add_matched[i].desc);
+ }
+ }
+}
+
+static void ReportMissedExpectedRace(ExpectRace *race) {
+ Printf("==================\n");
+ Printf("WARNING: ThreadSanitizer: missed expected data race\n");
+ Printf(" %s addr=%zx %s:%d\n",
+ race->desc, race->addr, race->file, race->line);
+ Printf("==================\n");
+}
+} // namespace __tsan
+
+using namespace __tsan; // NOLINT
+
+extern "C" {
+void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+ Release(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+ Acquire(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignal);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
+ uptr lock) {
+ SCOPED_ANNOTATION(AnnotateCondVarWait);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreate);
+ MutexCreate(thr, pc, m, true, true, false);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
+ MutexCreate(thr, pc, m, true, true, true);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockDestroy);
+ MutexDestroy(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockAcquired);
+ if (is_w)
+ MutexLock(thr, pc, m);
+ else
+ MutexReadLock(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockReleased);
+ if (is_w)
+ MutexUnlock(thr, pc, m);
+ else
+ MutexReadUnlock(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateTraceMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushState);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
+ uptr size) {
+ SCOPED_ANNOTATION(AnnotateNewMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateNoOp);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
+ Lock lock(&dyn_ann_ctx->mtx);
+ while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
+ ExpectRace *race = dyn_ann_ctx->expect.next;
+ if (race->hitcount == 0) {
+ CTX()->nmissed_expected++;
+ ReportMissedExpectedRace(race);
+ }
+ race->prev->next = race->next;
+ race->next->prev = race->prev;
+ internal_free(race);
+ }
+}
+
+void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
+ char *f, int l, int enable) {
+ SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
+ // FIXME: Reconsider this functionality later. It may be irrelevant.
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
+ char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQGet(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQGet);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQPut(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQPut);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQDestroy);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQCreate);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateExpectRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateExpectRace);
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->expect,
+ f, l, mem, 1, desc);
+ DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+static void BenignRaceImpl(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->benign,
+ f, l, mem, size, desc);
+ DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
+void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, size, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateBenignRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRace);
+ BenignRaceImpl(f, l, mem, 1, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
+ ThreadIgnoreSyncBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
+ ThreadIgnoreSyncEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateThreadName(
+ char *f, int l, char *name) {
+ SCOPED_ANNOTATION(AnnotateThreadName);
+ ThreadSetName(thr, name);
+}
+
+// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
+// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
+// atomic operations, which should be handled by ThreadSanitizer correctly.
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr sz, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, sz, desc);
+}
+
+int INTERFACE_ATTRIBUTE RunningOnValgrind() {
+ return flags()->running_on_valgrind;
+}
+
+double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
+ return 10.0;
+}
+
+const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
+ if (internal_strcmp(query, "pure_happens_before") == 0)
+ return "1";
+ else
+ return "0";
+}
+
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+} // extern "C"
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.h b/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.h
new file mode 100644
index 000000000..45c18352e
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_ann.h
@@ -0,0 +1,31 @@
+//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for dynamic annotations.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ANN_H
+#define TSAN_INTERFACE_ANN_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_acquire(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_release(void *addr);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ANN_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.cc b/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.cc
new file mode 100644
index 000000000..180d87b79
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -0,0 +1,677 @@
+//===-- tsan_interface_atomic.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// ThreadSanitizer atomic operations are based on C++11/C1x standards.
+// For background see C++11 standard. A slightly older, publically
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_interface_atomic.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan; // NOLINT
+
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ mo = ConvertOrder(mo); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+// Some shortcuts.
+typedef __tsan_memory_order morder;
+typedef __tsan_atomic8 a8;
+typedef __tsan_atomic16 a16;
+typedef __tsan_atomic32 a32;
+typedef __tsan_atomic64 a64;
+typedef __tsan_atomic128 a128;
+const morder mo_relaxed = __tsan_memory_order_relaxed;
+const morder mo_consume = __tsan_memory_order_consume;
+const morder mo_acquire = __tsan_memory_order_acquire;
+const morder mo_release = __tsan_memory_order_release;
+const morder mo_acq_rel = __tsan_memory_order_acq_rel;
+const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ CHECK_EQ(thr_->in_rtl, 0);
+ ProcessPendingSignals(thr);
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ thr_->in_rtl++;
+ }
+ ~ScopedAtomic() {
+ thr_->in_rtl--;
+ CHECK_EQ(thr_->in_rtl, 0);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+
+static bool IsLoadOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_consume
+ || mo == mo_acquire || mo == mo_seq_cst;
+}
+
+static bool IsStoreOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+}
+
+static bool IsReleaseOrder(morder mo) {
+ return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcquireOrder(morder mo) {
+ return mo == mo_consume || mo == mo_acquire
+ || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcqRelOrder(morder mo) {
+ return mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static morder ConvertOrder(morder mo) {
+ if (mo > (morder)100500) {
+ mo = morder(mo - 100500);
+ if (mo == morder(1 << 0))
+ mo = mo_relaxed;
+ else if (mo == morder(1 << 1))
+ mo = mo_consume;
+ else if (mo == morder(1 << 2))
+ mo = mo_acquire;
+ else if (mo == morder(1 << 3))
+ mo = mo_release;
+ else if (mo == morder(1 << 4))
+ mo = mo_acq_rel;
+ else if (mo == morder(1 << 5))
+ mo = mo_seq_cst;
+ }
+ CHECK_GE(mo, mo_relaxed);
+ CHECK_LE(mo, mo_seq_cst);
+ return mo;
+}
+
+template<typename T> T func_xchg(volatile T *v, T op) {
+ T res = __sync_lock_test_and_set(v, op);
+ // __sync_lock_test_and_set does not contain full barrier.
+ __sync_synchronize();
+ return res;
+}
+
+template<typename T> T func_add(volatile T *v, T op) {
+ return __sync_fetch_and_add(v, op);
+}
+
+template<typename T> T func_sub(volatile T *v, T op) {
+ return __sync_fetch_and_sub(v, op);
+}
+
+template<typename T> T func_and(volatile T *v, T op) {
+ return __sync_fetch_and_and(v, op);
+}
+
+template<typename T> T func_or(volatile T *v, T op) {
+ return __sync_fetch_and_or(v, op);
+}
+
+template<typename T> T func_xor(volatile T *v, T op) {
+ return __sync_fetch_and_xor(v, op);
+}
+
+template<typename T> T func_nand(volatile T *v, T op) {
+ // clang does not support __sync_fetch_and_nand.
+ T cmp = *v;
+ for (;;) {
+ T newv = ~(cmp & op);
+ T cur = __sync_val_compare_and_swap(v, cmp, newv);
+ if (cmp == cur)
+ return cmp;
+ cmp = cur;
+ }
+}
+
+template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+ return __sync_val_compare_and_swap(v, cmp, xch);
+}
+
+// clang does not support 128-bit atomic ops.
+// Atomic ops are executed under tsan internal mutex,
+// here we assume that the atomic variables are not accessed
+// from non-instrumented code.
+#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+a128 func_xchg(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = op;
+ return cmp;
+}
+
+a128 func_add(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp + op;
+ return cmp;
+}
+
+a128 func_sub(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp - op;
+ return cmp;
+}
+
+a128 func_and(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp & op;
+ return cmp;
+}
+
+a128 func_or(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp | op;
+ return cmp;
+}
+
+a128 func_xor(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp ^ op;
+ return cmp;
+}
+
+a128 func_nand(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = ~(cmp & op);
+ return cmp;
+}
+
+a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ a128 cur = *v;
+ if (cur == cmp)
+ *v = xch;
+ return cur;
+}
+#endif
+
+template<typename T>
+static int SizeLog() {
+ if (sizeof(T) <= 1)
+ return kSizeLog1;
+ else if (sizeof(T) <= 2)
+ return kSizeLog2;
+ else if (sizeof(T) <= 4)
+ return kSizeLog4;
+ else
+ return kSizeLog8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
+
+template<typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
+ morder mo) {
+ CHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return *a; // as if atomic
+ }
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ AcquireImpl(thr, pc, &s->clock);
+ T v = *a;
+ s->mtx.ReadUnlock();
+ __sync_synchronize();
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return v;
+}
+
+template<typename T>
+static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ CHECK(IsStoreOrder(mo));
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
+ *a = v; // as if atomic
+ return;
+ }
+ __sync_synchronize();
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
+ *a = v;
+ s->mtx.Unlock();
+ // Trainling memory barrier to provide sequential consistency
+ // for Dekker-like store-load synchronization.
+ __sync_synchronize();
+}
+
+template<typename T, T (*F)(volatile T *v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ v = F(a, v);
+ if (s)
+ s->mtx.Unlock();
+ return v;
+}
+
+template<typename T>
+static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T *c, T v, morder mo, morder fmo) {
+ (void)fmo; // Unused because llvm does not pass it yet.
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (s)
+ s->mtx.Unlock();
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
+}
+
+template<typename T>
+static T AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T c, T v, morder mo, morder fmo) {
+ AtomicCAS(thr, pc, a, &c, v, mo, fmo);
+ return c;
+}
+
+static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
+ __sync_synchronize();
+}
+
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+#endif
+
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+#endif
+
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+void __tsan_atomic_thread_fence(morder mo) {
+ char* a = 0;
+ SCOPED_ATOMIC(Fence, mo);
+}
+
+void __tsan_atomic_signal_fence(morder mo) {
+}
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.h b/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.h
new file mode 100644
index 000000000..c500614ac
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_atomic.h
@@ -0,0 +1,203 @@
+//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+__extension__ typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#undef INTERFACE_ATTRIBUTE
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_inl.h b/gcc-4.9/libsanitizer/tsan/tsan_interface_inl.h
new file mode 100644
index 000000000..4d6e10bcf
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_inl.h
@@ -0,0 +1,83 @@
+//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+void __tsan_read1(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ if (*vptr_p != new_val) {
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+ }
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+}
+
+void __tsan_func_entry(void *pc) {
+ FuncEntry(cur_thread(), (uptr)pc);
+}
+
+void __tsan_func_exit() {
+ FuncExit(cur_thread());
+}
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_java.cc b/gcc-4.9/libsanitizer/tsan/tsan_interface_java.cc
new file mode 100644
index 000000000..70b5e5fcc
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_java.cc
@@ -0,0 +1,329 @@
+//===-- tsan_interface_java.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface_java.h"
+#include "tsan_rtl.h"
+#include "tsan_mutex.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+using namespace __tsan; // NOLINT
+
+namespace __tsan {
+
+const uptr kHeapShadow = 0x300000000000ull;
+const uptr kHeapAlignment = 8;
+
+struct BlockDesc {
+ bool begin;
+ Mutex mtx;
+ SyncVar *head;
+
+ BlockDesc()
+ : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
+ , head() {
+ CHECK_EQ(begin, false);
+ begin = true;
+ }
+
+ ~BlockDesc() {
+ CHECK_EQ(begin, true);
+ begin = false;
+ ThreadState *thr = cur_thread();
+ SyncVar *s = head;
+ while (s) {
+ SyncVar *s1 = s->next;
+ StatInc(thr, StatSyncDestroyed);
+ s->mtx.Lock();
+ s->mtx.Unlock();
+ thr->mset.Remove(s->GetId());
+ DestroyAndFree(s);
+ s = s1;
+ }
+ }
+};
+
+struct JavaContext {
+ const uptr heap_begin;
+ const uptr heap_size;
+ BlockDesc *heap_shadow;
+
+ JavaContext(jptr heap_begin, jptr heap_size)
+ : heap_begin(heap_begin)
+ , heap_size(heap_size) {
+ uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
+ heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
+ if ((uptr)heap_shadow != kHeapShadow) {
+ Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
+ Die();
+ }
+ }
+};
+
+class ScopedJavaFunc {
+ public:
+ ScopedJavaFunc(ThreadState *thr, uptr pc)
+ : thr_(thr) {
+ Initialize(thr_);
+ FuncEntry(thr, pc);
+ CHECK_EQ(thr_->in_rtl, 0);
+ thr_->in_rtl++;
+ }
+
+ ~ScopedJavaFunc() {
+ thr_->in_rtl--;
+ CHECK_EQ(thr_->in_rtl, 0);
+ FuncExit(thr_);
+ // FIXME(dvyukov): process pending signals.
+ }
+
+ private:
+ ThreadState *thr_;
+};
+
+static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
+static JavaContext *jctx;
+
+static BlockDesc *getblock(uptr addr) {
+ uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
+ return &jctx->heap_shadow[i];
+}
+
+static uptr USED getmem(BlockDesc *b) {
+ uptr i = b - jctx->heap_shadow;
+ uptr p = jctx->heap_begin + i * kHeapAlignment;
+ CHECK_GE(p, jctx->heap_begin);
+ CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
+ return p;
+}
+
+static BlockDesc *getblockbegin(uptr addr) {
+ for (BlockDesc *b = getblock(addr);; b--) {
+ CHECK_GE(b, jctx->heap_shadow);
+ if (b->begin)
+ return b;
+ }
+ return 0;
+}
+
+SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
+ bool write_lock, bool create) {
+ if (jctx == 0 || addr < jctx->heap_begin
+ || addr >= jctx->heap_begin + jctx->heap_size)
+ return 0;
+ BlockDesc *b = getblockbegin(addr);
+ DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
+ Lock l(&b->mtx);
+ SyncVar *s = b->head;
+ for (; s; s = s->next) {
+ if (s->addr == addr) {
+ DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
+ break;
+ }
+ }
+ if (s == 0 && create) {
+ DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
+ s = CTX()->synctab.Create(thr, pc, addr);
+ s->next = b->head;
+ b->head = s;
+ }
+ if (s) {
+ if (write_lock)
+ s->mtx.Lock();
+ else
+ s->mtx.ReadLock();
+ }
+ return s;
+}
+
+SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
+ // We do not destroy Java mutexes other than in __tsan_java_free().
+ return 0;
+}
+
+} // namespace __tsan
+
+#define SCOPED_JAVA_FUNC(func) \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ (void)pc; \
+ ScopedJavaFunc scoped(thr, caller_pc); \
+/**/
+
+void __tsan_java_init(jptr heap_begin, jptr heap_size) {
+ SCOPED_JAVA_FUNC(__tsan_java_init);
+ DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
+ CHECK_EQ(jctx, 0);
+ CHECK_GT(heap_begin, 0);
+ CHECK_GT(heap_size, 0);
+ CHECK_EQ(heap_begin % kHeapAlignment, 0);
+ CHECK_EQ(heap_size % kHeapAlignment, 0);
+ CHECK_LT(heap_begin, heap_begin + heap_size);
+ jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
+}
+
+int __tsan_java_fini() {
+ SCOPED_JAVA_FUNC(__tsan_java_fini);
+ DPrintf("#%d: java_fini()\n", thr->tid);
+ CHECK_NE(jctx, 0);
+ // FIXME(dvyukov): this does not call atexit() callbacks.
+ int status = Finalize(thr);
+ DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
+ return status;
+}
+
+void __tsan_java_alloc(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ BlockDesc *b = getblock(ptr);
+ new(b) BlockDesc();
+}
+
+void __tsan_java_free(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_free);
+ DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ BlockDesc *beg = getblock(ptr);
+ BlockDesc *end = getblock(ptr + size);
+ for (BlockDesc *b = beg; b != end; b++) {
+ if (b->begin)
+ b->~BlockDesc();
+ }
+}
+
+void __tsan_java_move(jptr src, jptr dst, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_move);
+ DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(src % kHeapAlignment, 0);
+ CHECK_EQ(dst % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(src, jctx->heap_begin);
+ CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ CHECK_GE(dst, jctx->heap_begin);
+ CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ CHECK(dst >= src + size || src >= dst + size);
+
+ // Assuming it's not running concurrently with threads that do
+ // memory accesses and mutex operations (stop-the-world phase).
+ { // NOLINT
+ BlockDesc *s = getblock(src);
+ BlockDesc *d = getblock(dst);
+ BlockDesc *send = getblock(src + size);
+ for (; s != send; s++, d++) {
+ CHECK_EQ(d->begin, false);
+ if (s->begin) {
+ DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
+ new(d) BlockDesc;
+ d->head = s->head;
+ for (SyncVar *sync = d->head; sync; sync = sync->next) {
+ uptr newaddr = sync->addr - src + dst;
+ DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
+ sync->addr = newaddr;
+ }
+ s->head = 0;
+ s->~BlockDesc();
+ }
+ }
+ }
+
+ { // NOLINT
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ for (; s != send; s++, d++) {
+ *d = *s;
+ *s = 0;
+ }
+ }
+}
+
+void __tsan_java_mutex_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexCreate(thr, pc, addr, true, true, true);
+ MutexLock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_read_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexCreate(thr, pc, addr, true, true, true);
+ MutexReadLock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_read_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexReadUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ CHECK_GT(rec, 0);
+
+ MutexCreate(thr, pc, addr, true, true, true);
+ MutexLock(thr, pc, addr, rec);
+}
+
+int __tsan_java_mutex_unlock_rec(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ return MutexUnlock(thr, pc, addr, true);
+}
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_interface_java.h b/gcc-4.9/libsanitizer/tsan/tsan_interface_java.h
new file mode 100644
index 000000000..885ff2897
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_interface_java.h
@@ -0,0 +1,81 @@
+//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for verification of Java or mixed Java/C++ programs.
+// The interface is intended to be used from within a JVM and notify TSan
+// about such events like Java locks and GC memory compaction.
+//
+// For plain memory accesses and function entry/exit a JVM is intended to use
+// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
+//
+// For volatile memory accesses and atomic operations JVM is intended to use
+// standard atomics API: __tsan_atomicN_load/store/etc.
+//
+// For usage examples see lit_tests/java_*.cc
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_JAVA_H
+#define TSAN_INTERFACE_JAVA_H
+
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long jptr; // NOLINT
+
+// Must be called before any other callback from Java.
+void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
+// Must be called when the application exits.
+// Not necessary the last callback (concurrently running threads are OK).
+// Returns exit status or 0 if tsan does not want to override it.
+int __tsan_java_fini() INTERFACE_ATTRIBUTE;
+
+// Callback for memory allocations.
+// May be omitted for allocations that are not subject to data races
+// nor contain synchronization objects (e.g. String).
+void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory free.
+// Can be aggregated for several objects (preferably).
+void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory move by GC.
+// Can be aggregated for several objects (preferably).
+// The ranges must not overlap.
+void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+
+// Mutex lock.
+// Addr is any unique address associated with the mutex.
+// Can be called on recursive reentry.
+void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex unlock.
+void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read lock.
+void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read unlock.
+void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Recursive mutex lock, intended for handling of Object.wait().
+// The 'rec' value must be obtained from the previous
+// __tsan_java_mutex_unlock_rec().
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
+// Recursive mutex unlock, intended for handling of Object.wait().
+// The return value says how many times this thread called lock()
+// w/o a pairing unlock() (i.e. how many recursive levels it unlocked).
+// It must be passed back to __tsan_java_mutex_lock_rec() to restore
+// the same recursion level.
+int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#undef INTERFACE_ATTRIBUTE
+
+#endif // #ifndef TSAN_INTERFACE_JAVA_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_md5.cc b/gcc-4.9/libsanitizer/tsan/tsan_md5.cc
new file mode 100644
index 000000000..883239c2e
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_md5.cc
@@ -0,0 +1,243 @@
+//===-- tsan_md5.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+
+#define STEP(f, a, b, c, d, x, t, s) \
+ (a) += f((b), (c), (d)) + (x) + (t); \
+ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+ (a) += (b);
+
+#define SET(n) \
+ (*(MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+ SET(n)
+
+typedef unsigned int MD5_u32plus;
+typedef unsigned long ulong_t; // NOLINT
+
+typedef struct {
+ MD5_u32plus lo, hi;
+ MD5_u32plus a, b, c, d;
+ unsigned char buffer[64];
+ MD5_u32plus block[16];
+} MD5_CTX;
+
+static void *body(MD5_CTX *ctx, void *data, ulong_t size) {
+ unsigned char *ptr;
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+
+ ptr = (unsigned char*)data;
+
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+ d = ctx->d;
+
+ do {
+ saved_a = a;
+ saved_b = b;
+ saved_c = c;
+ saved_d = d;
+
+ STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+ STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+ STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+ STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+ STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+ STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+ STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+ STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+ STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+ STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+ STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+ STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+ STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+ STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+ STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+ STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+
+ STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+ STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+ STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+ STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+ STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+ STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+ STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+ STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+ STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+ STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+ STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+ STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+ STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+ STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+ STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+ STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+
+ STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+ STEP(H, d, a, b, c, GET(8), 0x8771f681, 11)
+ STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+ STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23)
+ STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+ STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+ STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+ STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23)
+ STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+ STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11)
+ STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+ STEP(H, b, c, d, a, GET(6), 0x04881d05, 23)
+ STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+ STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11)
+ STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+ STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23)
+
+ STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+ STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+ STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+ STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+ STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+ STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+ STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+ STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+ STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+ STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+ STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+ STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+ STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+ STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+ STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+ STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+
+ a += saved_a;
+ b += saved_b;
+ c += saved_c;
+ d += saved_d;
+
+ ptr += 64;
+ } while (size -= 64);
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+ ctx->d = d;
+
+ return ptr;
+}
+
+void MD5_Init(MD5_CTX *ctx) {
+ ctx->a = 0x67452301;
+ ctx->b = 0xefcdab89;
+ ctx->c = 0x98badcfe;
+ ctx->d = 0x10325476;
+
+ ctx->lo = 0;
+ ctx->hi = 0;
+}
+
+void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
+ MD5_u32plus saved_lo;
+ ulong_t used, free;
+
+ saved_lo = ctx->lo;
+ if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+ ctx->hi++;
+ ctx->hi += size >> 29;
+
+ used = saved_lo & 0x3f;
+
+ if (used) {
+ free = 64 - used;
+
+ if (size < free) {
+ internal_memcpy(&ctx->buffer[used], data, size);
+ return;
+ }
+
+ internal_memcpy(&ctx->buffer[used], data, free);
+ data = (unsigned char *)data + free;
+ size -= free;
+ body(ctx, ctx->buffer, 64);
+ }
+
+ if (size >= 64) {
+ data = body(ctx, data, size & ~(ulong_t)0x3f);
+ size &= 0x3f;
+ }
+
+ internal_memcpy(ctx->buffer, data, size);
+}
+
+void MD5_Final(unsigned char *result, MD5_CTX *ctx) {
+ ulong_t used, free;
+
+ used = ctx->lo & 0x3f;
+
+ ctx->buffer[used++] = 0x80;
+
+ free = 64 - used;
+
+ if (free < 8) {
+ internal_memset(&ctx->buffer[used], 0, free);
+ body(ctx, ctx->buffer, 64);
+ used = 0;
+ free = 64;
+ }
+
+ internal_memset(&ctx->buffer[used], 0, free - 8);
+
+ ctx->lo <<= 3;
+ ctx->buffer[56] = ctx->lo;
+ ctx->buffer[57] = ctx->lo >> 8;
+ ctx->buffer[58] = ctx->lo >> 16;
+ ctx->buffer[59] = ctx->lo >> 24;
+ ctx->buffer[60] = ctx->hi;
+ ctx->buffer[61] = ctx->hi >> 8;
+ ctx->buffer[62] = ctx->hi >> 16;
+ ctx->buffer[63] = ctx->hi >> 24;
+
+ body(ctx, ctx->buffer, 64);
+
+ result[0] = ctx->a;
+ result[1] = ctx->a >> 8;
+ result[2] = ctx->a >> 16;
+ result[3] = ctx->a >> 24;
+ result[4] = ctx->b;
+ result[5] = ctx->b >> 8;
+ result[6] = ctx->b >> 16;
+ result[7] = ctx->b >> 24;
+ result[8] = ctx->c;
+ result[9] = ctx->c >> 8;
+ result[10] = ctx->c >> 16;
+ result[11] = ctx->c >> 24;
+ result[12] = ctx->d;
+ result[13] = ctx->d >> 8;
+ result[14] = ctx->d >> 16;
+ result[15] = ctx->d >> 24;
+
+ internal_memset(ctx, 0, sizeof(*ctx));
+}
+
+MD5Hash md5_hash(const void *data, uptr size) {
+ MD5Hash res;
+ MD5_CTX ctx;
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, (void*)data, size);
+ MD5_Final((unsigned char*)&res.hash[0], &ctx);
+ return res;
+}
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mman.cc b/gcc-4.9/libsanitizer/tsan/tsan_mman.cc
new file mode 100644
index 000000000..832374bec
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mman.cc
@@ -0,0 +1,273 @@
+//===-- tsan_mman.cc ------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_report.h"
+#include "tsan_flags.h"
+
+// May be overriden by front-end.
+extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+extern "C" void WEAK __tsan_free_hook(void *ptr) {
+ (void)ptr;
+}
+
+namespace __tsan {
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+void MBlock::Lock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ for (int iter = 0;; iter++) {
+ if (v & 1) {
+ if (iter < 10)
+ proc_yield(20);
+ else
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_relaxed);
+ continue;
+ }
+ if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
+ break;
+ }
+}
+
+void MBlock::Unlock() {
+ atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+ uptr v = atomic_load(a, memory_order_relaxed);
+ DCHECK(v & 1);
+ atomic_store(a, v & ~1, memory_order_relaxed);
+}
+
+struct MapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ DontNeedShadowFor(p, size);
+ }
+};
+
+static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
+Allocator *allocator() {
+ return reinterpret_cast<Allocator*>(&allocator_placeholder);
+}
+
+void InitializeAllocator() {
+ allocator()->Init();
+}
+
+void AllocatorThreadStart(ThreadState *thr) {
+ allocator()->InitCache(&thr->alloc_cache);
+ internal_allocator()->InitCache(&thr->internal_alloc_cache);
+}
+
+void AllocatorThreadFinish(ThreadState *thr) {
+ allocator()->DestroyCache(&thr->alloc_cache);
+ internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
+}
+
+void AllocatorPrintStats() {
+ allocator()->PrintStats();
+}
+
+static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
+ if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ return;
+ Context *ctx = CTX();
+ StackTrace stack;
+ stack.ObtainCurrent(thr, pc);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeSignalUnsafe);
+ if (!IsFiredSuppression(ctx, rep, stack)) {
+ rep.AddStack(&stack);
+ OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ }
+}
+
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
+ CHECK_GT(thr->in_rtl, 0);
+ if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
+ return AllocatorReturnNull();
+ void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
+ if (p == 0)
+ return 0;
+ MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
+ b->Init(sz, thr->tid, CurrentStackId(thr, pc));
+ if (CTX() && CTX()->initialized) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+ }
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ SignalUnsafeCall(thr, pc);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_NE(p, (void*)0);
+ DPrintf("#%d: free(%p)\n", thr->tid, p);
+ MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+ if (b->ListHead()) {
+ MBlock::ScopedLock l(b);
+ for (SyncVar *s = b->ListHead(); s;) {
+ SyncVar *res = s;
+ s = s->next;
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ DestroyAndFree(res);
+ }
+ b->ListReset();
+ }
+ if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
+ }
+ allocator()->Deallocate(&thr->alloc_cache, p);
+ SignalUnsafeCall(thr, pc);
+}
+
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
+ CHECK_GT(thr->in_rtl, 0);
+ void *p2 = 0;
+ // FIXME: Handle "shrinking" more efficiently,
+ // it seems that some software actually does this.
+ if (sz) {
+ p2 = user_alloc(thr, pc, sz);
+ if (p2 == 0)
+ return 0;
+ if (p) {
+ MBlock *b = user_mblock(thr, p);
+ CHECK_NE(b, 0);
+ internal_memcpy(p2, p, min(b->Size(), sz));
+ }
+ }
+ if (p)
+ user_free(thr, pc, p);
+ return p2;
+}
+
+uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
+ CHECK_GT(thr->in_rtl, 0);
+ if (p == 0)
+ return 0;
+ MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+ return b ? b->Size() : 0;
+}
+
+MBlock *user_mblock(ThreadState *thr, void *p) {
+ CHECK_NE(p, 0);
+ Allocator *a = allocator();
+ void *b = a->GetBlockBegin(p);
+ if (b == 0)
+ return 0;
+ return (MBlock*)a->GetMetaData(b);
+}
+
+void invoke_malloc_hook(void *ptr, uptr size) {
+ Context *ctx = CTX();
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ return;
+ __tsan_malloc_hook(ptr, size);
+}
+
+void invoke_free_hook(void *ptr) {
+ Context *ctx = CTX();
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ return;
+ __tsan_free_hook(ptr);
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ ThreadState *thr = cur_thread();
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ return InternalAlloc(sz, &thr->internal_alloc_cache);
+}
+
+void internal_free(void *p) {
+ ThreadState *thr = cur_thread();
+ CHECK_GT(thr->in_rtl, 0);
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalFree(p, &thr->internal_alloc_cache);
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+uptr __tsan_get_current_allocated_bytes() {
+ u64 stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ u64 m = stats[AllocatorStatMalloced];
+ u64 f = stats[AllocatorStatFreed];
+ return m >= f ? m - f : 1;
+}
+
+uptr __tsan_get_heap_size() {
+ u64 stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ u64 m = stats[AllocatorStatMmapped];
+ u64 f = stats[AllocatorStatUnmapped];
+ return m >= f ? m - f : 1;
+}
+
+uptr __tsan_get_free_bytes() {
+ return 1;
+}
+
+uptr __tsan_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __tsan_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+bool __tsan_get_ownership(void *p) {
+ return allocator()->GetBlockBegin(p) != 0;
+}
+
+uptr __tsan_get_allocated_size(void *p) {
+ if (p == 0)
+ return 0;
+ p = allocator()->GetBlockBegin(p);
+ if (p == 0)
+ return 0;
+ MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+ return b->Size();
+}
+
+void __tsan_on_thread_idle() {
+ ThreadState *thr = cur_thread();
+ allocator()->SwallowCache(&thr->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+}
+} // extern "C"
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mman.h b/gcc-4.9/libsanitizer/tsan/tsan_mman.h
new file mode 100644
index 000000000..90faaffa1
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mman.h
@@ -0,0 +1,82 @@
+//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MMAN_H
+#define TSAN_MMAN_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+const uptr kDefaultAlignment = 16;
+
+void InitializeAllocator();
+void AllocatorThreadStart(ThreadState *thr);
+void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorPrintStats();
+
+// For user allocations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment);
+// Does not accept NULL.
+void user_free(ThreadState *thr, uptr pc, void *p);
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
+void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
+// Given the pointer p into a valid allocated block,
+// returns the descriptor of the block.
+MBlock *user_mblock(ThreadState *thr, void *p);
+
+// Invoking malloc/free hooks that may be installed by the user.
+void invoke_malloc_hook(void *ptr, uptr size);
+void invoke_free_hook(void *ptr);
+
+enum MBlockType {
+ MBlockScopedBuf,
+ MBlockString,
+ MBlockStackTrace,
+ MBlockShadowStack,
+ MBlockSync,
+ MBlockClock,
+ MBlockThreadContex,
+ MBlockDeadInfo,
+ MBlockRacyStacks,
+ MBlockRacyAddresses,
+ MBlockAtExit,
+ MBlockFlag,
+ MBlockReport,
+ MBlockReportMop,
+ MBlockReportThread,
+ MBlockReportMutex,
+ MBlockReportLoc,
+ MBlockReportStack,
+ MBlockSuppression,
+ MBlockExpectRace,
+ MBlockSignal,
+ MBlockFD,
+ MBlockJmpBuf,
+
+ // This must be the last.
+ MBlockTypeCount
+};
+
+// For internal data structures.
+void *internal_alloc(MBlockType typ, uptr sz);
+void internal_free(void *p);
+
+template<typename T>
+void DestroyAndFree(T *&p) {
+ p->~T();
+ internal_free(p);
+ p = 0;
+}
+
+} // namespace __tsan
+#endif // TSAN_MMAN_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mutex.cc b/gcc-4.9/libsanitizer/tsan/tsan_mutex.cc
new file mode 100644
index 000000000..e7846c53e
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mutex.cc
@@ -0,0 +1,272 @@
+//===-- tsan_mutex.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_mutex.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
+// Readers have preference, can possibly starvate writers.
+
+// The table fixes what mutexes can be locked under what mutexes.
+// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
+// then Report mutex can be locked while under Threads mutex.
+// The leaf mutexes can be locked under any other mutexes.
+// Recursive locking is not supported.
+#if TSAN_DEBUG && !TSAN_GO
+const MutexType MutexTypeLeaf = (MutexType)-1;
+static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
+ /*0 MutexTypeInvalid*/ {},
+ /*1 MutexTypeTrace*/ {MutexTypeLeaf},
+ /*2 MutexTypeThreads*/ {MutexTypeReport},
+ /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ MutexTypeMBlock, MutexTypeJavaMBlock},
+ /*4 MutexTypeSyncVar*/ {},
+ /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*6 MutexTypeSlab*/ {MutexTypeLeaf},
+ /*7 MutexTypeAnnotations*/ {},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+ /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
+ /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+};
+
+static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
+#endif
+
+void InitializeMutex() {
+#if TSAN_DEBUG && !TSAN_GO
+ // Build the "can lock" adjacency matrix.
+ // If [i][j]==true, then one can lock mutex j while under mutex i.
+ const int N = MutexTypeCount;
+ int cnt[N] = {};
+ bool leaf[N] = {};
+ for (int i = 1; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ MutexType z = CanLockTab[i][j];
+ if (z == MutexTypeInvalid)
+ continue;
+ if (z == MutexTypeLeaf) {
+ CHECK(!leaf[i]);
+ leaf[i] = true;
+ continue;
+ }
+ CHECK(!CanLockAdj[i][(int)z]);
+ CanLockAdj[i][(int)z] = true;
+ cnt[i]++;
+ }
+ }
+ for (int i = 0; i < N; i++) {
+ CHECK(!leaf[i] || cnt[i] == 0);
+ }
+ // Add leaf mutexes.
+ for (int i = 0; i < N; i++) {
+ if (!leaf[i])
+ continue;
+ for (int j = 0; j < N; j++) {
+ if (i == j || leaf[j] || j == MutexTypeInvalid)
+ continue;
+ CHECK(!CanLockAdj[j][i]);
+ CanLockAdj[j][i] = true;
+ }
+ }
+ // Build the transitive closure.
+ bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ CanLockAdj2[i][j] = CanLockAdj[i][j];
+ }
+ }
+ for (int k = 0; k < N; k++) {
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
+ CanLockAdj2[i][j] = true;
+ }
+ }
+ }
+ }
+#if 0
+ Printf("Can lock graph:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj[i][j]);
+ }
+ Printf("\n");
+ }
+ Printf("Can lock graph closure:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj2[i][j]);
+ }
+ Printf("\n");
+ }
+#endif
+ // Verify that the graph is acyclic.
+ for (int i = 0; i < N; i++) {
+ if (CanLockAdj2[i][i]) {
+ Printf("Mutex %d participates in a cycle\n", i);
+ Die();
+ }
+ }
+#endif
+}
+
+DeadlockDetector::DeadlockDetector() {
+ // Rely on zero initialization because some mutexes can be locked before ctor.
+}
+
+#if TSAN_DEBUG && !TSAN_GO
+void DeadlockDetector::Lock(MutexType t) {
+ // Printf("LOCK %d @%zu\n", t, seq_ + 1);
+ CHECK_GT(t, MutexTypeInvalid);
+ CHECK_LT(t, MutexTypeCount);
+ u64 max_seq = 0;
+ u64 max_idx = MutexTypeInvalid;
+ for (int i = 0; i != MutexTypeCount; i++) {
+ if (locked_[i] == 0)
+ continue;
+ CHECK_NE(locked_[i], max_seq);
+ if (max_seq < locked_[i]) {
+ max_seq = locked_[i];
+ max_idx = i;
+ }
+ }
+ locked_[t] = ++seq_;
+ if (max_idx == MutexTypeInvalid)
+ return;
+ // Printf(" last %d @%zu\n", max_idx, max_seq);
+ if (!CanLockAdj[max_idx][t]) {
+ Printf("ThreadSanitizer: internal deadlock detected\n");
+ Printf("ThreadSanitizer: can't lock %d while under %zu\n",
+ t, (uptr)max_idx);
+ CHECK(0);
+ }
+}
+
+void DeadlockDetector::Unlock(MutexType t) {
+ // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
+ CHECK(locked_[t]);
+ locked_[t] = 0;
+}
+#endif
+
+const uptr kUnlocked = 0;
+const uptr kWriteLock = 1;
+const uptr kReadLock = 2;
+
+class Backoff {
+ public:
+ Backoff()
+ : iter_() {
+ }
+
+ bool Do() {
+ if (iter_++ < kActiveSpinIters)
+ proc_yield(kActiveSpinCnt);
+ else
+ internal_sched_yield();
+ return true;
+ }
+
+ u64 Contention() const {
+ u64 active = iter_ % kActiveSpinIters;
+ u64 passive = iter_ - active;
+ return active + 10 * passive;
+ }
+
+ private:
+ int iter_;
+ static const int kActiveSpinIters = 10;
+ static const int kActiveSpinCnt = 20;
+};
+
+Mutex::Mutex(MutexType type, StatType stat_type) {
+ CHECK_GT(type, MutexTypeInvalid);
+ CHECK_LT(type, MutexTypeCount);
+#if TSAN_DEBUG
+ type_ = type;
+#endif
+#if TSAN_COLLECT_STATS
+ stat_type_ = stat_type;
+#endif
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+}
+
+Mutex::~Mutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+}
+
+void Mutex::Lock() {
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Lock(type_);
+#endif
+ uptr cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
+ cmp = kUnlocked;
+ if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire)) {
+#if TSAN_COLLECT_STATS
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+ }
+}
+
+void Mutex::Unlock() {
+ uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ (void)prev;
+ DCHECK_NE(prev & kWriteLock, 0);
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::ReadLock() {
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Lock(type_);
+#endif
+ uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0) {
+#if TSAN_COLLECT_STATS
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+}
+
+void Mutex::ReadUnlock() {
+ uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ (void)prev;
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+#if TSAN_DEBUG && !TSAN_GO
+ cur_thread()->deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mutex.h b/gcc-4.9/libsanitizer/tsan/tsan_mutex.h
new file mode 100644
index 000000000..6d1450593
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mutex.h
@@ -0,0 +1,80 @@
+//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEX_H
+#define TSAN_MUTEX_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum MutexType {
+ MutexTypeInvalid,
+ MutexTypeTrace,
+ MutexTypeThreads,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeSyncTab,
+ MutexTypeSlab,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+ MutexTypeMBlock,
+ MutexTypeJavaMBlock,
+
+ // This must be the last.
+ MutexTypeCount
+};
+
+class Mutex {
+ public:
+ explicit Mutex(MutexType type, StatType stat_type);
+ ~Mutex();
+
+ void Lock();
+ void Unlock();
+
+ void ReadLock();
+ void ReadUnlock();
+
+ void CheckLocked();
+
+ private:
+ atomic_uintptr_t state_;
+#if TSAN_DEBUG
+ MutexType type_;
+#endif
+#if TSAN_COLLECT_STATS
+ StatType stat_type_;
+#endif
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
+
+class DeadlockDetector {
+ public:
+ DeadlockDetector();
+ void Lock(MutexType t);
+ void Unlock(MutexType t);
+ private:
+ u64 seq_;
+ u64 locked_[MutexTypeCount];
+};
+
+void InitializeMutex();
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEX_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mutexset.cc b/gcc-4.9/libsanitizer/tsan/tsan_mutexset.cc
new file mode 100644
index 000000000..3ebae3a57
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mutexset.cc
@@ -0,0 +1,87 @@
+//===-- tsan_mutexset.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_mutexset.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+const uptr MutexSet::kMaxSize;
+
+MutexSet::MutexSet() {
+ size_ = 0;
+ internal_memset(&descs_, 0, sizeof(descs_));
+}
+
+void MutexSet::Add(u64 id, bool write, u64 epoch) {
+ // Look up existing mutex with the same id.
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ descs_[i].count++;
+ descs_[i].epoch = epoch;
+ return;
+ }
+ }
+ // On overflow, find the oldest mutex and drop it.
+ if (size_ == kMaxSize) {
+ u64 minepoch = (u64)-1;
+ u64 mini = (u64)-1;
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].epoch < minepoch) {
+ minepoch = descs_[i].epoch;
+ mini = i;
+ }
+ }
+ RemovePos(mini);
+ CHECK_EQ(size_, kMaxSize - 1);
+ }
+ // Add new mutex descriptor.
+ descs_[size_].id = id;
+ descs_[size_].write = write;
+ descs_[size_].epoch = epoch;
+ descs_[size_].count = 1;
+ size_++;
+}
+
+void MutexSet::Del(u64 id, bool write) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ if (--descs_[i].count == 0)
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::Remove(u64 id) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::RemovePos(uptr i) {
+ CHECK_LT(i, size_);
+ descs_[i] = descs_[size_ - 1];
+ size_--;
+}
+
+uptr MutexSet::Size() const {
+ return size_;
+}
+
+MutexSet::Desc MutexSet::Get(uptr i) const {
+ CHECK_LT(i, size_);
+ return descs_[i];
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_mutexset.h b/gcc-4.9/libsanitizer/tsan/tsan_mutexset.h
new file mode 100644
index 000000000..df36b462c
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_mutexset.h
@@ -0,0 +1,63 @@
+//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// MutexSet holds the set of mutexes currently held by a thread.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEXSET_H
+#define TSAN_MUTEXSET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class MutexSet {
+ public:
+ // Holds limited number of mutexes.
+ // The oldest mutexes are discarded on overflow.
+ static const uptr kMaxSize = 16;
+ struct Desc {
+ u64 id;
+ u64 epoch;
+ int count;
+ bool write;
+ };
+
+ MutexSet();
+ // The 'id' is obtained from SyncVar::GetId().
+ void Add(u64 id, bool write, u64 epoch);
+ void Del(u64 id, bool write);
+ void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ uptr Size() const;
+ Desc Get(uptr i) const;
+
+ private:
+#ifndef TSAN_GO
+ uptr size_;
+ Desc descs_[kMaxSize];
+#endif
+
+ void RemovePos(uptr i);
+};
+
+// Go does not have mutexes, so do not spend memory and time.
+// (Go sync.Mutex is actually a semaphore -- can be unlocked
+// in different goroutine).
+#ifdef TSAN_GO
+MutexSet::MutexSet() {}
+void MutexSet::Add(u64 id, bool write, u64 epoch) {}
+void MutexSet::Del(u64 id, bool write) {}
+void MutexSet::Remove(u64 id) {}
+void MutexSet::RemovePos(uptr i) {}
+uptr MutexSet::Size() const { return 0; }
+MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+#endif
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEXSET_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_platform.h b/gcc-4.9/libsanitizer/tsan/tsan_platform.h
new file mode 100644
index 000000000..164ee45d6
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_platform.h
@@ -0,0 +1,171 @@
+//===-- tsan_platform.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Platform-specific code.
+//===----------------------------------------------------------------------===//
+
+/*
+C++ linux memory layout:
+0000 0000 0000 - 03c0 0000 0000: protected
+03c0 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 6000 0000 0000: protected
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
+
+C++ COMPAT linux memory layout:
+0000 0000 0000 - 0400 0000 0000: protected
+0400 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 2900 0000 0000: protected
+2900 0000 0000 - 2c00 0000 0000: modules
+2c00 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7f00 0000 0000: -
+7f00 0000 0000 - 7fff ffff ffff: main thread stack
+
+Go linux and darwin memory layout:
+0000 0000 0000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1380 0000 0000: shadow
+1460 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7fff ffff ffff: -
+
+Go windows memory layout:
+0000 0000 0000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0560 0000 0000: shadow
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07ff ffff ffff: -
+*/
+
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+#if defined(__LP64__) || defined(_WIN64)
+namespace __tsan {
+
+#if defined(TSAN_GO)
+static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
+# if SANITIZER_WINDOWS
+static const uptr kLinuxShadowMsk = 0x010000000000ULL;
+# else
+static const uptr kLinuxShadowMsk = 0x200000000000ULL;
+# endif
+// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
+// when memory addresses are of the 0x2axxxxxxxxxx form.
+// The option is enabled with 'setarch x86_64 -L'.
+#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+#else
+static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
+static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
+#endif
+
+static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+
+#if SANITIZER_WINDOWS
+const uptr kTraceMemBegin = 0x056000000000ULL;
+#else
+const uptr kTraceMemBegin = 0x600000000000ULL;
+#endif
+const uptr kTraceMemSize = 0x020000000000ULL;
+
+// This has to be a macro to allow constant initialization of constants below.
+#ifndef TSAN_GO
+#define MemToShadow(addr) \
+ (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+#else
+#define MemToShadow(addr) \
+ ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+#endif
+
+static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
+static const uptr kLinuxShadowEnd =
+ MemToShadow(kLinuxAppMemEnd) | 0xff;
+
+static inline bool IsAppMem(uptr mem) {
+ return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+}
+
+static inline bool IsShadowMem(uptr mem) {
+ return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+}
+
+static inline uptr ShadowToMem(uptr shadow) {
+ CHECK(IsShadowMem(shadow));
+#ifdef TSAN_GO
+ return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
+#else
+ return (shadow / kShadowCnt) | kLinuxAppMemMsk;
+#endif
+}
+
+// For COMPAT mapping returns an alternative address
+// that mapped to the same shadow address.
+// COMPAT mapping is not quite one-to-one.
+static inline uptr AlternativeAddress(uptr addr) {
+#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+ return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL;
+#else
+ return 0;
+#endif
+}
+
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size);
+uptr GetRSS();
+
+const char *InitializePlatform();
+void FinalizePlatform();
+
+// The additional page is to catch shadow stack overflow as paging fault.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + 4096
+ + 4095) & ~4095;
+
+uptr ALWAYS_INLINE GetThreadTrace(int tid) {
+ uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ return p;
+}
+
+uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
+ uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ + kTraceSize * sizeof(Event);
+ DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ return p;
+}
+
+void internal_start_thread(void(*func)(void*), void *arg);
+
+// Says whether the addr relates to a global var.
+// Guesses with high probability, may yield both false positives and negatives.
+bool IsGlobalVar(uptr addr);
+int ExtractResolvFDs(void *state, int *fds, int nfd);
+int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+
+} // namespace __tsan
+
+#else // defined(__LP64__) || defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#endif // TSAN_PLATFORM_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_platform_linux.cc b/gcc-4.9/libsanitizer/tsan/tsan_platform_linux.cc
new file mode 100644
index 000000000..fe69430b7
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_platform_linux.cc
@@ -0,0 +1,386 @@
+//===-- tsan_platform_linux.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Linux-specific code.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+#include <dlfcn.h>
+#define __need_res_state
+#include <resolv.h>
+#include <malloc.h>
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+extern "C" struct mallinfo __libc_mallinfo();
+
+namespace __tsan {
+
+const uptr kPageSize = 4096;
+
+#ifndef TSAN_GO
+ScopedInRtl::ScopedInRtl()
+ : thr_(cur_thread()) {
+ in_rtl_ = thr_->in_rtl;
+ thr_->in_rtl++;
+ errno_ = errno;
+}
+
+ScopedInRtl::~ScopedInRtl() {
+ thr_->in_rtl--;
+ errno = errno_;
+ CHECK_EQ(in_rtl_, thr_->in_rtl);
+}
+#else
+ScopedInRtl::ScopedInRtl() {
+}
+
+ScopedInRtl::~ScopedInRtl() {
+}
+#endif
+
+void FillProfileCallback(uptr start, uptr rss, bool file,
+ uptr *mem, uptr stats_size) {
+ CHECK_EQ(7, stats_size);
+ mem[6] += rss; // total
+ start >>= 40;
+ if (start < 0x10) // shadow
+ mem[0] += rss;
+ else if (start >= 0x20 && start < 0x30) // compat modules
+ mem[file ? 1 : 2] += rss;
+ else if (start >= 0x7e) // modules
+ mem[file ? 1 : 2] += rss;
+ else if (start >= 0x60 && start < 0x62) // traces
+ mem[3] += rss;
+ else if (start >= 0x7d && start < 0x7e) // heap
+ mem[4] += rss;
+ else // other
+ mem[5] += rss;
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size) {
+ uptr mem[7] = {};
+ __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+ char *buf_pos = buf;
+ char *buf_end = buf + buf_size;
+ buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
+ "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
+ mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
+ mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
+ struct mallinfo mi = __libc_mallinfo();
+ buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
+ "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
+ mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
+}
+
+uptr GetRSS() {
+ uptr mem[7] = {};
+ __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+ return mem[6];
+}
+
+
+void FlushShadowMemoryCallback(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+}
+
+void FlushShadowMemory() {
+ StopTheWorld(FlushShadowMemoryCallback, 0);
+}
+
+#ifndef TSAN_GO
+static void ProtectRange(uptr beg, uptr end) {
+ ScopedInRtl in_rtl;
+ CHECK_LE(beg, end);
+ if (beg == end)
+ return;
+ if (beg != (uptr)Mprotect(beg, end - beg)) {
+ Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
+ Printf("FATAL: Make sure you are not using unlimited stack\n");
+ Die();
+ }
+}
+#endif
+
+#ifndef TSAN_GO
+// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Accesses to .rodata can't race, so this saves time, memory and trace space.
+static void MapRodata() {
+ // First create temp file.
+ const char *tmpdir = GetEnv("TMPDIR");
+ if (tmpdir == 0)
+ tmpdir = GetEnv("TEST_TMPDIR");
+#ifdef P_tmpdir
+ if (tmpdir == 0)
+ tmpdir = P_tmpdir;
+#endif
+ if (tmpdir == 0)
+ return;
+ char filename[256];
+ internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ tmpdir, (int)internal_getpid());
+ uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (internal_iserror(openrv))
+ return;
+ fd_t fd = openrv;
+ // Fill the file with kShadowRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
+ InternalScopedBuffer<u64> marker(kMarkerSize);
+ for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ *p = kShadowRodata;
+ internal_write(fd, marker.data(), marker.size());
+ // Map the file into memory.
+ uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
+ if (internal_iserror(page)) {
+ internal_close(fd);
+ internal_unlink(filename);
+ return;
+ }
+ // Map the file into shadow of .rodata sections.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr start, end, offset, prot;
+ char name[128];
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
+ if (name[0] != 0 && name[0] != '['
+ && (prot & MemoryMappingLayout::kProtectionRead)
+ && (prot & MemoryMappingLayout::kProtectionExecute)
+ && !(prot & MemoryMappingLayout::kProtectionWrite)
+ && IsAppMem(start)) {
+ // Assume it's .rodata
+ char *shadow_start = (char*)MemToShadow(start);
+ char *shadow_end = (char*)MemToShadow(end);
+ for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
+ internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ }
+ }
+ }
+ internal_close(fd);
+ internal_unlink(filename);
+}
+
+void InitializeShadowMemory() {
+ uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
+ kLinuxShadowEnd - kLinuxShadowBeg);
+ if (shadow != kLinuxShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
+ Die();
+ }
+ const uptr kClosedLowBeg = 0x200000;
+ const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
+ const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
+ const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
+ ProtectRange(kClosedLowBeg, kClosedLowEnd);
+ ProtectRange(kClosedMidBeg, kClosedMidEnd);
+ DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
+ kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
+ DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
+ kLinuxShadowBeg, kLinuxShadowEnd,
+ (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+ DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
+ kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
+ DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+ kLinuxAppMemBeg, kLinuxAppMemEnd,
+ (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("stack %zx\n", (uptr)&shadow);
+
+ MapRodata();
+}
+#endif
+
+static uptr g_data_start;
+static uptr g_data_end;
+
+#ifndef TSAN_GO
+static void CheckPIE() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr start, end;
+ if (proc_maps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0,
+ /*protection*/0)) {
+ if ((u64)start < kLinuxAppMemBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
+ "something is mapped at 0x%zx < 0x%zx)\n",
+ start, kLinuxAppMemBeg);
+ Printf("FATAL: Make sure to compile with -fPIE"
+ " and to link with -pie.\n");
+ Die();
+ }
+ }
+}
+
+static void InitDataSeg() {
+ MemoryMappingLayout proc_maps(true);
+ uptr start, end, offset;
+ char name[128];
+ bool prev_is_data = false;
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
+ DPrintf("%p-%p %p %s\n", start, end, offset, name);
+ bool is_data = offset != 0 && name[0] != 0;
+ // BSS may get merged with [heap] in /proc/self/maps. This is not very
+ // reliable.
+ bool is_bss = offset == 0 &&
+ (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
+ if (g_data_start == 0 && is_data)
+ g_data_start = start;
+ if (is_bss)
+ g_data_end = end;
+ prev_is_data = is_data;
+ }
+ DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
+ CHECK_LT(g_data_start, g_data_end);
+ CHECK_GE((uptr)&g_data_start, g_data_start);
+ CHECK_LT((uptr)&g_data_start, g_data_end);
+}
+
+#endif // #ifndef TSAN_GO
+
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ setrlimit(res, (rlimit*)&rlim);
+}
+
+const char *InitializePlatform() {
+ void *p = 0;
+ if (sizeof(p) == 8) {
+ // Disable core dumps, dumping of 16TB usually takes a bit long.
+ setlim(RLIMIT_CORE, 0);
+ }
+
+ // Go maps shadow memory lazily and works fine with limited address space.
+ // Unlimited stack is not a problem as well, because the executable
+ // is not compiled with -pie.
+ if (kCppMode) {
+ bool reexec = false;
+ // TSan doesn't play well with unlimited stack size (as stack
+ // overlaps with shadow memory). If we detect unlimited stack size,
+ // we re-exec the program with limited stack size as a best effort.
+ if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
+ Report("WARNING: Program is run with unlimited stack size, which "
+ "wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
+ SetStackSizeLimitInBytes(kMaxStackSize);
+ reexec = true;
+ }
+
+ if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ Report("WARNING: Program is run with limited virtual address space,"
+ " which wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ setlim(RLIMIT_AS, -1);
+ reexec = true;
+ }
+ if (reexec)
+ ReExec();
+ }
+
+#ifndef TSAN_GO
+ CheckPIE();
+ InitTlsSize();
+ InitDataSeg();
+#endif
+ return GetEnv(kTsanOptionsEnv);
+}
+
+bool IsGlobalVar(uptr addr) {
+ return g_data_start && addr >= g_data_start && addr < g_data_end;
+}
+
+#ifndef TSAN_GO
+// Extract file descriptors passed to glibc internal __res_iclose function.
+// This is required to properly "close" the fds, because we do not see internal
+// closes within glibc. The code is a pure hack.
+int ExtractResolvFDs(void *state, int *fds, int nfd) {
+ int cnt = 0;
+ __res_state *statp = (__res_state*)state;
+ for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+ if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+ fds[cnt++] = statp->_u._ext.nssocks[i];
+ }
+ return cnt;
+}
+
+// Extract file descriptors passed via UNIX domain sockets.
+// This is requried to properly handle "open" of these fds.
+// see 'man recvmsg' and 'man 3 cmsg'.
+int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
+ int res = 0;
+ msghdr *msg = (msghdr*)msgp;
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
+ continue;
+ int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
+ for (int i = 0; i < n; i++) {
+ fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
+ if (res == nfd)
+ return res;
+ }
+ }
+ return res;
+}
+#endif
+
+
+} // namespace __tsan
+
+#endif // SANITIZER_LINUX
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_platform_mac.cc b/gcc-4.9/libsanitizer/tsan/tsan_platform_mac.cc
new file mode 100644
index 000000000..3dca611dc
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_platform_mac.cc
@@ -0,0 +1,93 @@
+//===-- tsan_platform_mac.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+namespace __tsan {
+
+ScopedInRtl::ScopedInRtl() {
+}
+
+ScopedInRtl::~ScopedInRtl() {
+}
+
+uptr GetShadowMemoryConsumption() {
+ return 0;
+}
+
+void FlushShadowMemory() {
+}
+
+#ifndef TSAN_GO
+void InitializeShadowMemory() {
+ uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
+ kLinuxShadowEnd - kLinuxShadowBeg);
+ if (shadow != kLinuxShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie.\n");
+ Die();
+ }
+ DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
+ kLinuxShadowBeg, kLinuxShadowEnd,
+ (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+ DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+ kLinuxAppMemBeg, kLinuxAppMemEnd,
+ (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+}
+#endif
+
+const char *InitializePlatform() {
+ void *p = 0;
+ if (sizeof(p) == 8) {
+ // Disable core dumps, dumping of 16TB usually takes a bit long.
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit lim;
+ lim.rlim_cur = 0;
+ lim.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, (rlimit*)&lim);
+ }
+
+ return GetEnv(kTsanOptionsEnv);
+}
+
+void FinalizePlatform() {
+ fflush(0);
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_platform_windows.cc b/gcc-4.9/libsanitizer/tsan/tsan_platform_windows.cc
new file mode 100644
index 000000000..6e49ef42f
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_platform_windows.cc
@@ -0,0 +1,45 @@
+//===-- tsan_platform_windows.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Windows-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#include "tsan_platform.h"
+
+#include <stdlib.h>
+
+namespace __tsan {
+
+ScopedInRtl::ScopedInRtl() {
+}
+
+ScopedInRtl::~ScopedInRtl() {
+}
+
+uptr GetShadowMemoryConsumption() {
+ return 0;
+}
+
+void FlushShadowMemory() {
+}
+
+const char *InitializePlatform() {
+ return GetEnv(kTsanOptionsEnv);
+}
+
+void FinalizePlatform() {
+ fflush(0);
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_WINDOWS
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_report.cc b/gcc-4.9/libsanitizer/tsan/tsan_report.cc
new file mode 100644
index 000000000..f2484166e
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_report.cc
@@ -0,0 +1,302 @@
+//===-- tsan_report.cc ----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __tsan {
+
+class Decorator: private __sanitizer::AnsiColorDecorator {
+ public:
+ Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+ const char *Access() { return Blue(); }
+ const char *EndAccess() { return Default(); }
+ const char *ThreadDescription() { return Cyan(); }
+ const char *EndThreadDescription() { return Default(); }
+ const char *Location() { return Green(); }
+ const char *EndLocation() { return Default(); }
+ const char *Sleep() { return Yellow(); }
+ const char *EndSleep() { return Default(); }
+ const char *Mutex() { return Magenta(); }
+ const char *EndMutex() { return Default(); }
+};
+
+ReportDesc::ReportDesc()
+ : stacks(MBlockReportStack)
+ , mops(MBlockReportMop)
+ , locs(MBlockReportLoc)
+ , mutexes(MBlockReportMutex)
+ , threads(MBlockReportThread)
+ , sleep()
+ , count() {
+}
+
+ReportMop::ReportMop()
+ : mset(MBlockReportMutex) {
+}
+
+ReportDesc::~ReportDesc() {
+ // FIXME(dvyukov): it must be leaking a lot of memory.
+}
+
+#ifndef TSAN_GO
+
+const int kThreadBufSize = 32;
+const char *thread_name(char *buf, int tid) {
+ if (tid == 0)
+ return "main thread";
+ internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
+ return buf;
+}
+
+static const char *ReportTypeString(ReportType typ) {
+ if (typ == ReportTypeRace)
+ return "data race";
+ if (typ == ReportTypeVptrRace)
+ return "data race on vptr (ctor/dtor vs virtual call)";
+ if (typ == ReportTypeUseAfterFree)
+ return "heap-use-after-free";
+ if (typ == ReportTypeThreadLeak)
+ return "thread leak";
+ if (typ == ReportTypeMutexDestroyLocked)
+ return "destroy of a locked mutex";
+ if (typ == ReportTypeSignalUnsafe)
+ return "signal-unsafe call inside of a signal";
+ if (typ == ReportTypeErrnoInSignal)
+ return "signal handler spoils errno";
+ return "";
+}
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0) {
+ Printf(" [failed to restore the stack]\n\n");
+ return;
+ }
+ for (int i = 0; ent; ent = ent->next, i++) {
+ Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
+ if (ent->col)
+ Printf(":%d", ent->col);
+ if (ent->module && ent->offset)
+ Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
+ else
+ Printf(" (%p)\n", (void*)ent->pc);
+ }
+ Printf("\n");
+}
+
+static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
+ for (uptr i = 0; i < mset.Size(); i++) {
+ if (i == 0)
+ Printf(" (mutexes:");
+ const ReportMopMutex m = mset[i];
+ Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(i == mset.Size() - 1 ? ")" : ",");
+ }
+}
+
+static const char *MopDesc(bool first, bool write, bool atomic) {
+ return atomic ? (first ? (write ? "Atomic write" : "Atomic read")
+ : (write ? "Previous atomic write" : "Previous atomic read"))
+ : (first ? (write ? "Write" : "Read")
+ : (write ? "Previous write" : "Previous read"));
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ Printf("%s", d.Access());
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic),
+ mop->size, (void*)mop->addr,
+ thread_name(thrbuf, mop->tid));
+ PrintMutexSet(mop->mset);
+ Printf(":\n");
+ Printf("%s", d.EndAccess());
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ bool print_stack = false;
+ Printf("%s", d.Location());
+ if (loc->type == ReportLocationGlobal) {
+ Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
+ loc->name, loc->size, loc->addr, loc->module, loc->offset);
+ } else if (loc->type == ReportLocationHeap) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->size, loc->addr, thread_name(thrbuf, loc->tid));
+ print_stack = true;
+ } else if (loc->type == ReportLocationStack) {
+ Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationTLS) {
+ Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationFD) {
+ Printf(" Location is file descriptor %d created by %s at:\n",
+ loc->fd, thread_name(thrbuf, loc->tid));
+ print_stack = true;
+ }
+ Printf("%s", d.EndLocation());
+ if (print_stack)
+ PrintStack(loc->stack);
+}
+
+static void PrintMutex(const ReportMutex *rm) {
+ Decorator d;
+ if (rm->destroyed) {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
+ Printf("%s", d.EndMutex());
+ } else {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu created at:\n", rm->id);
+ Printf("%s", d.EndMutex());
+ PrintStack(rm->stack);
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ Decorator d;
+ if (rt->id == 0) // Little sense in describing the main thread.
+ return;
+ Printf("%s", d.ThreadDescription());
+ Printf(" Thread T%d", rt->id);
+ if (rt->name && rt->name[0] != '\0')
+ Printf(" '%s'", rt->name);
+ char thrbuf[kThreadBufSize];
+ Printf(" (tid=%zu, %s) created by %s",
+ rt->pid, rt->running ? "running" : "finished",
+ thread_name(thrbuf, rt->parent_tid));
+ if (rt->stack)
+ Printf(" at:");
+ Printf("\n");
+ Printf("%s", d.EndThreadDescription());
+ PrintStack(rt->stack);
+}
+
+static void PrintSleep(const ReportStack *s) {
+ Decorator d;
+ Printf("%s", d.Sleep());
+ Printf(" As if synchronized via sleep:\n");
+ Printf("%s", d.EndSleep());
+ PrintStack(s);
+}
+
+static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
+ if (rep->mops.Size())
+ return rep->mops[0]->stack;
+ if (rep->stacks.Size())
+ return rep->stacks[0];
+ if (rep->mutexes.Size())
+ return rep->mutexes[0]->stack;
+ if (rep->threads.Size())
+ return rep->threads[0]->stack;
+ return 0;
+}
+
+ReportStack *SkipTsanInternalFrames(ReportStack *ent) {
+ while (FrameIsInternal(ent) && ent->next)
+ ent = ent->next;
+ return ent;
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Decorator d;
+ Printf("==================\n");
+ const char *rep_typ_str = ReportTypeString(rep->typ);
+ Printf("%s", d.Warning());
+ Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
+ (int)internal_getpid());
+ Printf("%s", d.EndWarning());
+
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
+
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+
+ if (rep->sleep)
+ PrintSleep(rep->sleep);
+
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+
+ if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
+ Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
+
+ if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
+ ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+
+ Printf("==================\n");
+}
+
+#else // #ifndef TSAN_GO
+
+const int kMainThreadId = 1;
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0) {
+ Printf(" [failed to restore the stack]\n");
+ return;
+ }
+ for (int i = 0; ent; ent = ent->next, i++) {
+ Printf(" %s()\n %s:%d +0x%zx\n",
+ ent->func, ent->file, ent->line, (void*)ent->offset);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Printf("\n");
+ Printf("%s by ",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")));
+ if (mop->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == kMainThreadId)
+ return;
+ Printf("\n");
+ Printf("Goroutine %d (%s) created at:\n",
+ rt->id, rt->running ? "running" : "finished");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Printf("==================\n");
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ Printf("==================\n");
+}
+
+#endif
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_report.h b/gcc-4.9/libsanitizer/tsan/tsan_report.h
new file mode 100644
index 000000000..c0eef9eb0
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_report.h
@@ -0,0 +1,119 @@
+//===-- tsan_report.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_REPORT_H
+#define TSAN_REPORT_H
+
+#include "tsan_defs.h"
+#include "tsan_vector.h"
+
+namespace __tsan {
+
+enum ReportType {
+ ReportTypeRace,
+ ReportTypeVptrRace,
+ ReportTypeUseAfterFree,
+ ReportTypeThreadLeak,
+ ReportTypeMutexDestroyLocked,
+ ReportTypeSignalUnsafe,
+ ReportTypeErrnoInSignal
+};
+
+struct ReportStack {
+ ReportStack *next;
+ char *module;
+ uptr offset;
+ uptr pc;
+ char *func;
+ char *file;
+ int line;
+ int col;
+};
+
+struct ReportMopMutex {
+ u64 id;
+ bool write;
+};
+
+struct ReportMop {
+ int tid;
+ uptr addr;
+ int size;
+ bool write;
+ bool atomic;
+ Vector<ReportMopMutex> mset;
+ ReportStack *stack;
+
+ ReportMop();
+};
+
+enum ReportLocationType {
+ ReportLocationGlobal,
+ ReportLocationHeap,
+ ReportLocationStack,
+ ReportLocationTLS,
+ ReportLocationFD
+};
+
+struct ReportLocation {
+ ReportLocationType type;
+ uptr addr;
+ uptr size;
+ char *module;
+ uptr offset;
+ int tid;
+ int fd;
+ char *name;
+ char *file;
+ int line;
+ ReportStack *stack;
+};
+
+struct ReportThread {
+ int id;
+ uptr pid;
+ bool running;
+ char *name;
+ int parent_tid;
+ ReportStack *stack;
+};
+
+struct ReportMutex {
+ u64 id;
+ bool destroyed;
+ ReportStack *stack;
+};
+
+class ReportDesc {
+ public:
+ ReportType typ;
+ Vector<ReportStack*> stacks;
+ Vector<ReportMop*> mops;
+ Vector<ReportLocation*> locs;
+ Vector<ReportMutex*> mutexes;
+ Vector<ReportThread*> threads;
+ ReportStack *sleep;
+ int count;
+
+ ReportDesc();
+ ~ReportDesc();
+
+ private:
+ ReportDesc(const ReportDesc&);
+ void operator = (const ReportDesc&);
+};
+
+// Format and output the report to the console/log. No additional logic.
+void PrintReport(const ReportDesc *rep);
+void PrintStack(const ReportStack *stack);
+
+} // namespace __tsan
+
+#endif // TSAN_REPORT_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl.cc b/gcc-4.9/libsanitizer/tsan/tsan_rtl.cc
new file mode 100644
index 000000000..573eeb8a9
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl.cc
@@ -0,0 +1,771 @@
+//===-- tsan_rtl.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main file (entry points) for the TSan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_defs.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+
+volatile int __tsan_resumed = 0;
+
+extern "C" void __tsan_resume() {
+ __tsan_resumed = 1;
+}
+
+namespace __tsan {
+
+#ifndef TSAN_GO
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+#endif
+static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+
+// Can be overriden by a front-end.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnFinalize(bool failed);
+#else
+SANITIZER_INTERFACE_ATTRIBUTE
+bool WEAK OnFinalize(bool failed) {
+ return failed;
+}
+#endif
+
+static Context *ctx;
+Context *CTX() {
+ return ctx;
+}
+
+static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ // Map thread trace when context is created.
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
+ new(ThreadTrace(tid)) Trace();
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
+ return new(mem) ThreadContext(tid);
+}
+
+#ifndef TSAN_GO
+static const u32 kThreadQuarantineSize = 16;
+#else
+static const u32 kThreadQuarantineSize = 64;
+#endif
+
+Context::Context()
+ : initialized()
+ , report_mtx(MutexTypeReport, StatMtxReport)
+ , nreported()
+ , nmissed_expected()
+ , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize))
+ , racy_stacks(MBlockRacyStacks)
+ , racy_addresses(MBlockRacyAddresses)
+ , fired_suppressions(8) {
+}
+
+// The objects are allocated in TLS, so one may rely on zero-initialization.
+ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , in_rtl()
+#ifndef TSAN_GO
+ , jmp_bufs(MBlockJmpBuf)
+#endif
+ , tid(tid)
+ , unique_id(unique_id)
+ , stk_addr(stk_addr)
+ , stk_size(stk_size)
+ , tls_addr(tls_addr)
+ , tls_size(tls_size) {
+}
+
+static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
+ uptr n_threads;
+ uptr n_running_threads;
+ ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+ InternalScopedBuffer<char> buf(4096);
+ internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
+ i, n_threads, n_running_threads);
+ internal_write(fd, buf.data(), internal_strlen(buf.data()));
+ WriteMemoryProfile(buf.data(), buf.size());
+ internal_write(fd, buf.data(), internal_strlen(buf.data()));
+}
+
+static void BackgroundThread(void *arg) {
+ ScopedInRtl in_rtl;
+ Context *ctx = CTX();
+ const u64 kMs2Ns = 1000 * 1000;
+
+ fd_t mprof_fd = kInvalidFd;
+ if (flags()->profile_memory && flags()->profile_memory[0]) {
+ InternalScopedBuffer<char> filename(4096);
+ internal_snprintf(filename.data(), filename.size(), "%s.%d",
+ flags()->profile_memory, (int)internal_getpid());
+ uptr openrv = OpenFile(filename.data(), true);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = openrv;
+ }
+ }
+
+ u64 last_flush = NanoTime();
+ uptr last_rss = 0;
+ for (int i = 0; ; i++) {
+ SleepForSeconds(1);
+ u64 now = NanoTime();
+
+ // Flush memory if requested.
+ if (flags()->flush_memory_ms > 0) {
+ if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
+ if (flags()->verbosity > 0)
+ Printf("ThreadSanitizer: periodic memory flush\n");
+ FlushShadowMemory();
+ last_flush = NanoTime();
+ }
+ }
+ if (flags()->memory_limit_mb > 0) {
+ uptr rss = GetRSS();
+ uptr limit = uptr(flags()->memory_limit_mb) << 20;
+ if (flags()->verbosity > 0) {
+ Printf("ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
+ }
+ if (2 * rss > limit + last_rss) {
+ if (flags()->verbosity > 0)
+ Printf("ThreadSanitizer: flushing memory due to RSS\n");
+ FlushShadowMemory();
+ rss = GetRSS();
+ if (flags()->verbosity > 0)
+ Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ }
+ last_rss = rss;
+ }
+
+ // Write memory profile if requested.
+ if (mprof_fd != kInvalidFd)
+ MemoryProfiler(ctx, mprof_fd, i);
+
+#ifndef TSAN_GO
+ // Flush symbolizer cache if requested.
+ if (flags()->flush_symbolizer_ms > 0) {
+ u64 last = atomic_load(&ctx->last_symbolize_time_ns,
+ memory_order_relaxed);
+ if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
+ Lock l(&ctx->report_mtx);
+ SpinMutexLock l2(&CommonSanitizerReportMutex);
+ SymbolizeFlush();
+ atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
+ }
+ }
+#endif
+ }
+}
+
+void DontNeedShadowFor(uptr addr, uptr size) {
+ uptr shadow_beg = MemToShadow(addr);
+ uptr shadow_end = MemToShadow(addr + size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+}
+
+void MapShadow(uptr addr, uptr size) {
+ MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+}
+
+void MapThreadTrace(uptr addr, uptr size) {
+ DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
+ CHECK_GE(addr, kTraceMemBegin);
+ CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
+ if (addr1 != addr) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
+ addr, size, addr1);
+ Die();
+ }
+}
+
+void Initialize(ThreadState *thr) {
+ // Thread safe because done before all threads exist.
+ static bool is_initialized = false;
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ SanitizerToolName = "ThreadSanitizer";
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(TsanCheckFailed);
+
+ ScopedInRtl in_rtl;
+#ifndef TSAN_GO
+ InitializeAllocator();
+#endif
+ InitializeInterceptors();
+ const char *env = InitializePlatform();
+ InitializeMutex();
+ InitializeDynamicAnnotations();
+ ctx = new(ctx_placeholder) Context;
+#ifndef TSAN_GO
+ InitializeShadowMemory();
+#endif
+ InitializeFlags(&ctx->flags, env);
+ // Setup correct file descriptor for error reports.
+ __sanitizer_set_report_path(flags()->log_path);
+ InitializeSuppressions();
+#ifndef TSAN_GO
+ InitializeLibIgnore();
+ // Initialize external symbolizer before internal threads are started.
+ const char *external_symbolizer = flags()->external_symbolizer_path;
+ bool external_symbolizer_started =
+ Symbolizer::Init(external_symbolizer)->IsExternalAvailable();
+ if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
+ !external_symbolizer_started) {
+ Printf("Failed to start external symbolizer: '%s'\n",
+ external_symbolizer);
+ Die();
+ }
+ Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+#endif
+ internal_start_thread(&BackgroundThread, 0);
+
+ if (ctx->flags.verbosity)
+ Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
+
+ // Initialize thread 0.
+ int tid = ThreadCreate(thr, 0, 0, true);
+ CHECK_EQ(tid, 0);
+ ThreadStart(thr, tid, internal_getpid());
+ CHECK_EQ(thr->in_rtl, 1);
+ ctx->initialized = true;
+
+ if (flags()->stop_on_start) {
+ Printf("ThreadSanitizer is suspended at startup (pid %d)."
+ " Call __tsan_resume().\n",
+ (int)internal_getpid());
+ while (__tsan_resumed == 0) {}
+ }
+}
+
+int Finalize(ThreadState *thr) {
+ ScopedInRtl in_rtl;
+ Context *ctx = __tsan::ctx;
+ bool failed = false;
+
+ if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
+ SleepForMillis(flags()->atexit_sleep_ms);
+
+ // Wait for pending reports.
+ ctx->report_mtx.Lock();
+ CommonSanitizerReportMutex.Lock();
+ CommonSanitizerReportMutex.Unlock();
+ ctx->report_mtx.Unlock();
+
+#ifndef TSAN_GO
+ if (ctx->flags.verbosity)
+ AllocatorPrintStats();
+#endif
+
+ ThreadFinalize(thr);
+
+ if (ctx->nreported) {
+ failed = true;
+#ifndef TSAN_GO
+ Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
+#else
+ Printf("Found %d data race(s)\n", ctx->nreported);
+#endif
+ }
+
+ if (ctx->nmissed_expected) {
+ failed = true;
+ Printf("ThreadSanitizer: missed %d expected races\n",
+ ctx->nmissed_expected);
+ }
+
+ if (flags()->print_suppressions)
+ PrintMatchedSuppressions();
+#ifndef TSAN_GO
+ if (flags()->print_benign)
+ PrintMatchedBenignRaces();
+#endif
+
+ failed = OnFinalize(failed);
+
+ StatAggregate(ctx->stat, thr->stat);
+ StatOutput(ctx->stat);
+ return failed ? flags()->exitcode : 0;
+}
+
+#ifndef TSAN_GO
+u32 CurrentStackId(ThreadState *thr, uptr pc) {
+ if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
+ return 0;
+ if (pc) {
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+ }
+ u32 id = StackDepotPut(thr->shadow_stack,
+ thr->shadow_stack_pos - thr->shadow_stack);
+ if (pc)
+ thr->shadow_stack_pos--;
+ return id;
+}
+#endif
+
+void TraceSwitch(ThreadState *thr) {
+ thr->nomalloc++;
+ ScopedInRtl in_rtl;
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ Lock l(&thr_trace->mtx);
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
+ TraceHeader *hdr = &thr_trace->headers[trace];
+ hdr->epoch0 = thr->fast_state.epoch();
+ hdr->stack0.ObtainCurrent(thr, 0);
+ hdr->mset0 = thr->mset;
+ thr->nomalloc--;
+}
+
+Trace *ThreadTrace(int tid) {
+ return (Trace*)GetThreadTraceHeader(tid);
+}
+
+uptr TraceTopPC(ThreadState *thr) {
+ Event *events = (Event*)GetThreadTrace(thr->tid);
+ uptr pc = events[thr->fast_state.GetTracePos()];
+ return pc;
+}
+
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
+uptr TraceParts() {
+ return TraceSize() / kTracePartSize;
+}
+
+#ifndef TSAN_GO
+extern "C" void __tsan_trace_switch() {
+ TraceSwitch(cur_thread());
+}
+
+extern "C" void __tsan_report_race() {
+ ReportRace(cur_thread());
+}
+#endif
+
+ALWAYS_INLINE
+Shadow LoadShadow(u64 *p) {
+ u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
+ return Shadow(raw);
+}
+
+ALWAYS_INLINE
+void StoreShadow(u64 *sp, u64 s) {
+ atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
+}
+
+ALWAYS_INLINE
+void StoreIfNotYetStored(u64 *sp, u64 *s) {
+ StoreShadow(sp, *s);
+ *s = 0;
+}
+
+static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ Shadow cur, Shadow old) {
+ thr->racy_state[0] = cur.raw();
+ thr->racy_state[1] = old.raw();
+ thr->racy_shadow_addr = shadow_mem;
+#ifndef TSAN_GO
+ HACKY_CALL(__tsan_report_race);
+#else
+ ReportRace(thr);
+#endif
+}
+
+static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
+ return old.epoch() >= thr->fast_synch_epoch;
+}
+
+static inline bool HappensBefore(Shadow old, ThreadState *thr) {
+ return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
+}
+
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+
+ // This potentially can live in an MMX/SSE scratch register.
+ // The required intrinsics are:
+ // __m128i _mm_move_epi64(__m128i*);
+ // _mm_storel_epi64(u64*, __m128i);
+ u64 store_word = cur.raw();
+
+ // scan all the shadow values and dispatch to 4 categories:
+ // same, replace, candidate and race (see comments below).
+ // we consider only 3 cases regarding access sizes:
+ // equal, intersect and not intersect. initially I considered
+ // larger and smaller as well, it allowed to replace some
+ // 'candidates' with 'same' or 'replace', but I think
+ // it's just not worth it (performance- and complexity-wise).
+
+ Shadow old(0);
+ if (kShadowCnt == 1) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 2) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 4) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 2;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 3;
+#include "tsan_update_shadow_word_inl.h"
+ } else if (kShadowCnt == 8) {
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 2;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 3;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 4;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 5;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 6;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 7;
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+ CHECK(false);
+ }
+
+ // we did not find any races and had already stored
+ // the current access info, so we are done
+ if (LIKELY(store_word == 0))
+ return;
+ // choose a random candidate slot and replace it
+ StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
+ StatInc(thr, StatShadowReplace);
+ return;
+ RACE:
+ HandleRace(thr, shadow_mem, cur, old);
+ return;
+}
+
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic) {
+ while (size) {
+ int size1 = 1;
+ int kAccessSizeLog = kSizeLog1;
+ if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ size1 = 8;
+ kAccessSizeLog = kSizeLog8;
+ } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ size1 = 4;
+ kAccessSizeLog = kSizeLog4;
+ } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ size1 = 2;
+ kAccessSizeLog = kSizeLog2;
+ }
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
+ addr += size1;
+ size -= size1;
+ }
+}
+
+ALWAYS_INLINE USED
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
+ " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
+ (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
+ (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
+ (uptr)shadow_mem[0], (uptr)shadow_mem[1],
+ (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
+#if TSAN_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+#endif
+
+ if (*shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ Shadow cur(fast_state);
+ cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
+ cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
+
+ // We must not store to the trace if we do not store to the shadow.
+ // That is, this call must be moved somewhere below.
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ u64 val) {
+ (void)thr;
+ (void)pc;
+ if (size == 0)
+ return;
+ // FIXME: fix me.
+ uptr offset = addr % kShadowCell;
+ if (offset) {
+ offset = kShadowCell - offset;
+ if (size <= offset)
+ return;
+ addr += offset;
+ size -= offset;
+ }
+ DCHECK_EQ(addr % 8, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do it only for C/C++.
+ if (kGoMode || size < 64*1024) {
+ u64 *p = (u64*)MemToShadow(addr);
+ CHECK(IsShadowMem((uptr)p));
+ CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
+ p[i++] = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ p[i++] = 0;
+ }
+ } else {
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = 4096;
+ u64 *begin = (u64*)MemToShadow(addr);
+ u64 *end = begin + size / kShadowCell * kShadowCnt;
+ u64 *p = begin;
+ // Set at least first kPageSize/2 to page boundary.
+ while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ // Reset middle part.
+ u64 *p1 = p;
+ p = RoundDown(end, kPageSize);
+ UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
+ MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
+ // Set the ending.
+ while (p < end) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ }
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryRangeSet(thr, pc, addr, size, 0);
+}
+
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ // Processing more than 1k (4k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ if (size > 1024)
+ size = 1024;
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
+ MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.MarkAsFreed();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+ALWAYS_INLINE USED
+void FuncEntry(ThreadState *thr, uptr pc) {
+ DCHECK_EQ(thr->in_rtl, 0);
+ StatInc(thr, StatFuncEnter);
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+
+ // Shadow stack maintenance can be replaced with
+ // stack unwinding during trace switch (which presumably must be faster).
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+ }
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE USED
+void FuncExit(ThreadState *thr) {
+ DCHECK_EQ(thr->in_rtl, 0);
+ StatInc(thr, StatFuncExit);
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
+ thr->ignore_reads_and_writes++;
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->fast_state.SetIgnoreBit();
+#ifndef TSAN_GO
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ thr->ignore_reads_and_writes--;
+ CHECK_GE(thr->ignore_reads_and_writes, 0);
+ if (thr->ignore_reads_and_writes == 0) {
+ thr->fast_state.ClearIgnoreBit();
+#ifndef TSAN_GO
+ thr->mop_ignore_set.Reset();
+#endif
+ }
+}
+
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
+ thr->ignore_sync++;
+ CHECK_GT(thr->ignore_sync, 0);
+#ifndef TSAN_GO
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ thr->ignore_sync--;
+ CHECK_GE(thr->ignore_sync, 0);
+#ifndef TSAN_GO
+ if (thr->ignore_sync == 0)
+ thr->mop_ignore_set.Reset();
+#endif
+}
+
+bool MD5Hash::operator==(const MD5Hash &other) const {
+ return hash[0] == other.hash[0] && hash[1] == other.hash[1];
+}
+
+#if TSAN_DEBUG
+void build_consistency_debug() {}
+#else
+void build_consistency_release() {}
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats() {}
+#else
+void build_consistency_nostats() {}
+#endif
+
+#if TSAN_SHADOW_COUNT == 1
+void build_consistency_shadow1() {}
+#elif TSAN_SHADOW_COUNT == 2
+void build_consistency_shadow2() {}
+#elif TSAN_SHADOW_COUNT == 4
+void build_consistency_shadow4() {}
+#else
+void build_consistency_shadow8() {}
+#endif
+
+} // namespace __tsan
+
+#ifndef TSAN_GO
+// Must be included in this file to make sure everything is inlined.
+#include "tsan_interface_inl.h"
+#endif
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl.h b/gcc-4.9/libsanitizer/tsan/tsan_rtl.h
new file mode 100644
index 000000000..20493ea4d
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl.h
@@ -0,0 +1,777 @@
+//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main internal TSan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __tsan, except for those
+// declared in tsan_interface.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headres included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_RTL_H
+#define TSAN_RTL_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_asm.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libignore.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_vector.h"
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_mutexset.h"
+#include "tsan_ignoreset.h"
+
+#if SANITIZER_WORDSIZE != 64
+# error "ThreadSanitizer is supported only on 64-bit platforms"
+#endif
+
+namespace __tsan {
+
+// Descriptor of user's memory block.
+struct MBlock {
+ /*
+ u64 mtx : 1; // must be first
+ u64 lst : 44;
+ u64 stk : 31; // on word boundary
+ u64 tid : kTidBits;
+ u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
+ */
+ u64 raw[2];
+
+ void Init(uptr siz, u32 tid, u32 stk) {
+ raw[0] = raw[1] = 0;
+ raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
+ raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
+ raw[0] |= (u64)stk << (1 + 44);
+ raw[1] |= (u64)stk >> (64 - 44 - 1);
+ DCHECK_EQ(Size(), siz);
+ DCHECK_EQ(Tid(), tid);
+ DCHECK_EQ(StackId(), stk);
+ }
+
+ u32 Tid() const {
+ return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
+ }
+
+ uptr Size() const {
+ return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
+ }
+
+ u32 StackId() const {
+ return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
+ }
+
+ SyncVar *ListHead() const {
+ return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
+ }
+
+ void ListPush(SyncVar *v) {
+ SyncVar *lst = ListHead();
+ v->next = lst;
+ u64 x = (u64)v ^ (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), v);
+ }
+
+ SyncVar *ListPop() {
+ SyncVar *lst = ListHead();
+ SyncVar *nxt = lst->next;
+ lst->next = 0;
+ u64 x = (u64)lst ^ (u64)nxt;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), nxt);
+ return lst;
+ }
+
+ void ListReset() {
+ SyncVar *lst = ListHead();
+ u64 x = (u64)lst;
+ x = (x >> 3) << 1;
+ raw[0] ^= x;
+ DCHECK_EQ(ListHead(), 0);
+ }
+
+ void Lock();
+ void Unlock();
+ typedef GenericScopedLock<MBlock> ScopedLock;
+};
+
+#ifndef TSAN_GO
+#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
+const uptr kAllocatorSpace = 0x7d0000000000ULL;
+#else
+const uptr kAllocatorSpace = 0x7d0000000000ULL;
+#endif
+const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
+
+struct MapUnmapCallback;
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+ DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+Allocator *allocator();
+#endif
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+
+const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
+
+// FastState (from most significant bit):
+// ignore : 1
+// tid : kTidBits
+// epoch : kClkBits
+// unused : -
+// history_size : 3
+class FastState {
+ public:
+ FastState(u64 tid, u64 epoch) {
+ x_ = tid << kTidShift;
+ x_ |= epoch << kClkShift;
+ DCHECK_EQ(tid, this->tid());
+ DCHECK_EQ(epoch, this->epoch());
+ DCHECK_EQ(GetIgnoreBit(), false);
+ }
+
+ explicit FastState(u64 x)
+ : x_(x) {
+ }
+
+ u64 raw() const {
+ return x_;
+ }
+
+ u64 tid() const {
+ u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+ return res;
+ }
+
+ u64 TidWithIgnore() const {
+ u64 res = x_ >> kTidShift;
+ return res;
+ }
+
+ u64 epoch() const {
+ u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ return res;
+ }
+
+ void IncrementEpoch() {
+ u64 old_epoch = epoch();
+ x_ += 1 << kClkShift;
+ DCHECK_EQ(old_epoch + 1, epoch());
+ (void)old_epoch;
+ }
+
+ void SetIgnoreBit() { x_ |= kIgnoreBit; }
+ void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+ bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~7) | hs;
+ }
+
+ int GetHistorySize() const {
+ return (int)(x_ & 7);
+ }
+
+ void ClearHistorySize() {
+ x_ &= ~7;
+ }
+
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
+
+ private:
+ friend class Shadow;
+ static const int kTidShift = 64 - kTidBits - 1;
+ static const int kClkShift = kTidShift - kClkBits;
+ static const u64 kIgnoreBit = 1ull << 63;
+ static const u64 kFreedBit = 1ull << 63;
+ u64 x_;
+};
+
+// Shadow (from most significant bit):
+// freed : 1
+// tid : kTidBits
+// epoch : kClkBits
+// is_atomic : 1
+// is_read : 1
+// size_log : 2
+// addr0 : 3
+class Shadow : public FastState {
+ public:
+ explicit Shadow(u64 x)
+ : FastState(x) {
+ }
+
+ explicit Shadow(const FastState &s)
+ : FastState(s.x_) {
+ ClearHistorySize();
+ }
+
+ void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+ DCHECK_EQ(x_ & 31, 0);
+ DCHECK_LE(addr0, 7);
+ DCHECK_LE(kAccessSizeLog, 3);
+ x_ |= (kAccessSizeLog << 3) | addr0;
+ DCHECK_EQ(kAccessSizeLog, size_log());
+ DCHECK_EQ(addr0, this->addr0());
+ }
+
+ void SetWrite(unsigned kAccessIsWrite) {
+ DCHECK_EQ(x_ & kReadBit, 0);
+ if (!kAccessIsWrite)
+ x_ |= kReadBit;
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
+ }
+
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
+ }
+
+ bool IsAtomic() const {
+ return x_ & kAtomicBit;
+ }
+
+ bool IsZero() const {
+ return x_ == 0;
+ }
+
+ static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+ u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+ DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
+ return shifted_xor == 0;
+ }
+
+ static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ return masked_xor == 0;
+ }
+
+ static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ unsigned kS2AccessSize) {
+ bool res = false;
+ u64 diff = s1.addr0() - s2.addr0();
+ if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
+ // if (s1.addr0() + size1) > s2.addr0()) return true;
+ if (s1.size() > -diff) res = true;
+ } else {
+ // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+ if (kS2AccessSize > diff) res = true;
+ }
+ DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ return res;
+ }
+
+ // The idea behind the offset is as follows.
+ // Consider that we have 8 bool's contained within a single 8-byte block
+ // (mapped to a single shadow "cell"). Now consider that we write to the bools
+ // from a single thread (which we consider the common case).
+ // W/o offsetting each access will have to scan 4 shadow values at average
+ // to find the corresponding shadow value for the bool.
+ // With offsetting we start scanning shadow with the offset so that
+ // each access hits necessary shadow straight off (at least in an expected
+ // optimistic case).
+ // This logic works seamlessly for any layout of user data. For example,
+ // if user data is {int, short, char, char}, then accesses to the int are
+ // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
+ // from a single thread won't need to scan all 8 shadow values.
+ unsigned ComputeSearchOffset() {
+ return x_ & 7;
+ }
+ u64 addr0() const { return x_ & 7; }
+ u64 size() const { return 1ull << size_log(); }
+ bool IsWrite() const { return !IsRead(); }
+ bool IsRead() const { return x_ & kReadBit; }
+
+ // The idea behind the freed bit is as follows.
+ // When the memory is freed (or otherwise unaccessible) we write to the shadow
+ // values with tid/epoch related to the free and the freed bit set.
+ // During memory accesses processing the freed bit is considered
+ // as msb of tid. So any access races with shadow with freed bit set
+ // (it is as if write from a thread with which we never synchronized before).
+ // This allows us to detect accesses to freed memory w/o additional
+ // overheads in memory access processing and at the same time restore
+ // tid/epoch of free.
+ void MarkAsFreed() {
+ x_ |= kFreedBit;
+ }
+
+ bool IsFreed() const {
+ return x_ & kFreedBit;
+ }
+
+ bool GetFreedAndReset() {
+ bool res = x_ & kFreedBit;
+ x_ &= ~kFreedBit;
+ return res;
+ }
+
+ bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
+ bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
+ | (kIsAtomic << kAtomicShift));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
+ private:
+ static const u64 kReadShift = 5;
+ static const u64 kReadBit = 1ull << kReadShift;
+ static const u64 kAtomicShift = 6;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
+ u64 size_log() const { return (x_ >> 3) & 3; }
+
+ static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ if (s1.addr0() == s2.addr0()) return true;
+ if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+ return true;
+ if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+ return true;
+ return false;
+ }
+};
+
+struct SignalContext;
+
+struct JmpBuf {
+ uptr sp;
+ uptr mangled_sp;
+ uptr *shadow_stack_pos;
+};
+
+// This struct is stored in TLS.
+struct ThreadState {
+ FastState fast_state;
+ // Synch epoch represents the threads's epoch before the last synchronization
+ // action. It allows to reduce number of shadow state updates.
+ // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
+ // if we are processing write to X from the same thread at epoch=200,
+ // we do nothing, because both writes happen in the same 'synch epoch'.
+ // That is, if another memory access does not race with the former write,
+ // it does not race with the latter as well.
+ // QUESTION: can we can squeeze this into ThreadState::Fast?
+ // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
+ // taken by epoch between synchs.
+ // This way we can save one load from tls.
+ u64 fast_synch_epoch;
+ // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
+ // We do not distinguish beteween ignoring reads and writes
+ // for better performance.
+ int ignore_reads_and_writes;
+ int ignore_sync;
+ // Go does not support ignores.
+#ifndef TSAN_GO
+ IgnoreSet mop_ignore_set;
+ IgnoreSet sync_ignore_set;
+#endif
+ // C/C++ uses fixed size shadow stack embed into Trace.
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ uptr *shadow_stack;
+ uptr *shadow_stack_end;
+ uptr *shadow_stack_pos;
+ u64 *racy_shadow_addr;
+ u64 racy_state[2];
+ MutexSet mset;
+ ThreadClock clock;
+#ifndef TSAN_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+ Vector<JmpBuf> jmp_bufs;
+#endif
+ u64 stat[StatCnt];
+ const int tid;
+ const int unique_id;
+ int in_rtl;
+ bool in_symbolizer;
+ bool in_ignored_lib;
+ bool is_alive;
+ bool is_freeing;
+ bool is_vptr_access;
+ const uptr stk_addr;
+ const uptr stk_size;
+ const uptr tls_addr;
+ const uptr tls_size;
+ ThreadContext *tctx;
+
+ DeadlockDetector deadlock_detector;
+
+ bool in_signal_handler;
+ SignalContext *signal_ctx;
+
+#ifndef TSAN_GO
+ u32 last_sleep_stack_id;
+ ThreadClock last_sleep_clock;
+#endif
+
+ // Set in regions of runtime that must be signal-safe and fork-safe.
+ // If set, malloc must not be called.
+ int nomalloc;
+
+ explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size);
+};
+
+Context *CTX();
+
+#ifndef TSAN_GO
+extern THREADLOCAL char cur_thread_placeholder[];
+INLINE ThreadState *cur_thread() {
+ return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
+}
+#endif
+
+class ThreadContext : public ThreadContextBase {
+ public:
+ explicit ThreadContext(int tid);
+ ~ThreadContext();
+ ThreadState *thr;
+#ifdef TSAN_GO
+ StackTrace creation_stack;
+#else
+ u32 creation_stack_id;
+#endif
+ SyncClock sync;
+ // Epoch at which the thread had started.
+ // If we see an event from the thread stamped by an older epoch,
+ // the event is from a dead thread that shared tid with this thread.
+ u64 epoch0;
+ u64 epoch1;
+
+ // Override superclass callbacks.
+ void OnDead();
+ void OnJoined(void *arg);
+ void OnFinished();
+ void OnStarted(void *arg);
+ void OnCreated(void *arg);
+ void OnReset();
+};
+
+struct RacyStacks {
+ MD5Hash hash[2];
+ bool operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
+ }
+};
+
+struct RacyAddress {
+ uptr addr_min;
+ uptr addr_max;
+};
+
+struct FiredSuppression {
+ ReportType type;
+ uptr pc;
+ Suppression *supp;
+};
+
+struct Context {
+ Context();
+
+ bool initialized;
+
+ SyncTab synctab;
+
+ Mutex report_mtx;
+ int nreported;
+ int nmissed_expected;
+ atomic_uint64_t last_symbolize_time_ns;
+
+ ThreadRegistry *thread_registry;
+
+ Vector<RacyStacks> racy_stacks;
+ Vector<RacyAddress> racy_addresses;
+ // Number of fired suppressions may be large enough.
+ InternalMmapVector<FiredSuppression> fired_suppressions;
+
+ Flags flags;
+
+ u64 stat[StatCnt];
+ u64 int_alloc_cnt[MBlockTypeCount];
+ u64 int_alloc_siz[MBlockTypeCount];
+};
+
+class ScopedInRtl {
+ public:
+ ScopedInRtl();
+ ~ScopedInRtl();
+ private:
+ ThreadState*thr_;
+ int in_rtl_;
+ int errno_;
+};
+
+class ScopedReport {
+ public:
+ explicit ScopedReport(ReportType typ);
+ ~ScopedReport();
+
+ void AddStack(const StackTrace *stack);
+ void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+ const MutexSet *mset);
+ void AddThread(const ThreadContext *tctx);
+ void AddMutex(const SyncVar *s);
+ void AddLocation(uptr addr, uptr size);
+ void AddSleep(u32 stack_id);
+ void SetCount(int count);
+
+ const ReportDesc *GetReport() const;
+
+ private:
+ Context *ctx_;
+ ReportDesc *rep_;
+
+ void AddMutex(u64 id);
+
+ ScopedReport(const ScopedReport&);
+ void operator = (const ScopedReport&);
+};
+
+void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+
+void StatAggregate(u64 *dst, u64 *src);
+void StatOutput(u64 *stat);
+void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
+ if (kCollectStats)
+ thr->stat[typ] += n;
+}
+void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
+ if (kCollectStats)
+ thr->stat[typ] = n;
+}
+
+void MapShadow(uptr addr, uptr size);
+void MapThreadTrace(uptr addr, uptr size);
+void DontNeedShadowFor(uptr addr, uptr size);
+void InitializeShadowMemory();
+void InitializeInterceptors();
+void InitializeLibIgnore();
+void InitializeDynamicAnnotations();
+
+void ReportRace(ThreadState *thr);
+bool OutputReport(Context *ctx,
+ const ScopedReport &srep,
+ const ReportStack *suppress_stack1 = 0,
+ const ReportStack *suppress_stack2 = 0,
+ const ReportLocation *suppress_loc = 0);
+bool IsFiredSuppression(Context *ctx,
+ const ScopedReport &srep,
+ const StackTrace &trace);
+bool IsExpectedReport(uptr addr, uptr size);
+void PrintMatchedBenignRaces();
+bool FrameIsInternal(const ReportStack *frame);
+ReportStack *SkipTsanInternalFrames(ReportStack *ent);
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
+# define DPrintf Printf
+#else
+# define DPrintf(...)
+#endif
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
+# define DPrintf2 Printf
+#else
+# define DPrintf2(...)
+#endif
+
+u32 CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(u32 stack_id);
+void PrintCurrentStack(ThreadState *thr, uptr pc);
+void PrintCurrentStackSlow(); // uses libunwind
+
+void Initialize(ThreadState *thr);
+int Finalize(ThreadState *thr);
+
+SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
+ bool write_lock, bool create);
+SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur);
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write);
+void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, uptr step, bool is_write);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
+}
+
+void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
+}
+
+void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
+}
+
+void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
+
+void FuncEntry(ThreadState *thr, uptr pc);
+void FuncExit(ThreadState *thr);
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, int tid, uptr os_id);
+void ThreadFinish(ThreadState *thr);
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+void ThreadFinalize(ThreadState *thr);
+void ThreadSetName(ThreadState *thr, const char *name);
+int ThreadCount(ThreadState *thr);
+void ProcessPendingSignals(ThreadState *thr);
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool rw, bool recursive, bool linker_init);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr);
+void AcquireGlobal(ThreadState *thr, uptr pc);
+void Release(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
+void AfterSleep(ThreadState *thr, uptr pc);
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+
+// The hacky call uses custom calling convention and an assembly thunk.
+// It is considerably faster that a normal call for the caller
+// if it is not executed (it is intended for slow paths from hot functions).
+// The trick is that the call preserves all registers and the compiler
+// does not treat it as a call.
+// If it does not work for you, use normal call.
+#if TSAN_DEBUG == 0
+// The caller may not create the stack frame for itself at all,
+// so we create a reserve stack frame for it (1024b must be enough).
+#define HACKY_CALL(f) \
+ __asm__ __volatile__("sub $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(1024) \
+ ".hidden " #f "_thunk;" \
+ "call " #f "_thunk;" \
+ "add $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(-1024) \
+ ::: "memory", "cc");
+#else
+#define HACKY_CALL(f) f()
+#endif
+
+void TraceSwitch(ThreadState *thr);
+uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
+uptr TraceParts();
+Trace *ThreadTrace(int tid);
+
+extern "C" void __tsan_trace_switch();
+void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
+ EventType typ, u64 addr) {
+ DCHECK_GE((int)typ, 0);
+ DCHECK_LE((int)typ, 7);
+ DCHECK_EQ(GetLsb(addr, 61), addr);
+ StatInc(thr, StatEvents);
+ u64 pos = fs.GetTracePos();
+ if (UNLIKELY((pos % kTracePartSize) == 0)) {
+#ifndef TSAN_GO
+ HACKY_CALL(__tsan_trace_switch);
+#else
+ TraceSwitch(thr);
+#endif
+ }
+ Event *trace = (Event*)GetThreadTrace(fs.tid());
+ Event *evp = &trace[pos];
+ Event ev = (u64)addr | ((u64)typ << 61);
+ *evp = ev;
+}
+
+} // namespace __tsan
+
+#endif // TSAN_RTL_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl_amd64.S b/gcc-4.9/libsanitizer/tsan/tsan_rtl_amd64.S
new file mode 100644
index 000000000..71a2ecda9
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -0,0 +1,302 @@
+#include "sanitizer_common/sanitizer_asm.h"
+.hidden __tsan_trace_switch
+.globl __tsan_trace_switch_thunk
+__tsan_trace_switch_thunk:
+ CFI_STARTPROC
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call __tsan_trace_switch
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+.hidden __tsan_report_race
+.globl __tsan_report_race_thunk
+__tsan_report_race_thunk:
+ CFI_STARTPROC
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call __tsan_report_race
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain %rsp
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+ movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+.size setjmp, .-setjmp
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl _setjmp
+.type _setjmp, @function
+_setjmp:
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain %rsp
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+ movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+.size _setjmp, .-_setjmp
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl sigsetjmp
+.type sigsetjmp, @function
+sigsetjmp:
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain %rsp
+ lea 32(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+.size sigsetjmp, .-sigsetjmp
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain %rsp
+ lea 32(%rsp), %rdi
+ mov %rdi, %rsi
+ xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
+ rol $0x11, %rsi
+ // call tsan interceptor
+ call __tsan_setjmp
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+.size __sigsetjmp, .-__sigsetjmp
+
+#ifdef __linux__
+/* We do not need executable stack. */
+.section .note.GNU-stack,"",@progbits
+#endif
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl_mutex.cc b/gcc-4.9/libsanitizer/tsan/tsan_rtl_mutex.cc
new file mode 100644
index 000000000..d9a3a3baa
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -0,0 +1,346 @@
+//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_report.h"
+#include "tsan_symbolize.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool rw, bool recursive, bool linker_init) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexCreate);
+ if (!linker_init && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ s->is_rw = rw;
+ s->is_recursive = recursive;
+ s->is_linker_init = linker_init;
+ s->mtx.Unlock();
+}
+
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexDestroy);
+#ifndef TSAN_GO
+ // Global mutexes not marked as LINKER_INITIALIZED
+ // cause tons of not interesting reports, so just ignore it.
+ if (IsGlobalVar(addr))
+ return;
+#endif
+ SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
+ if (s == 0)
+ return;
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ if (flags()->report_destroy_locked
+ && s->owner_tid != SyncVar::kInvalidTid
+ && !s->is_broken) {
+ s->is_broken = true;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(s);
+ StackTrace trace;
+ trace.ObtainCurrent(thr, pc);
+ rep.AddStack(&trace);
+ FastState last(s->last_lock);
+ RestoreStack(last.tid(), last.epoch(), &trace, 0);
+ rep.AddStack(&trace);
+ rep.AddLocation(s->addr, 1);
+ OutputReport(ctx, rep);
+ }
+ thr->mset.Remove(s->GetId());
+ DestroyAndFree(s);
+}
+
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
+ CHECK_GT(rec, 0);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state.raw();
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else {
+ Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
+ PrintCurrentStack(thr, pc);
+ }
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexLock);
+ AcquireImpl(thr, pc, &s->clock);
+ AcquireImpl(thr, pc, &s->read_clock);
+ } else if (!s->is_recursive) {
+ StatInc(thr, StatMutexRecLock);
+ }
+ s->recursion += rec;
+ thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ s->mtx.Unlock();
+}
+
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ int rec = 0;
+ if (s->recursion == 0) {
+ if (!s->is_broken) {
+ s->is_broken = true;
+ Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
+ PrintCurrentStack(thr, pc);
+ }
+ } else if (s->owner_tid != thr->tid) {
+ if (!s->is_broken) {
+ s->is_broken = true;
+ Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
+ addr);
+ PrintCurrentStack(thr, pc);
+ }
+ } else {
+ rec = all ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ }
+ thr->mset.Del(s->GetId(), true);
+ s->mtx.Unlock();
+ return rec;
+}
+
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadLock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
+ addr);
+ PrintCurrentStack(thr, pc);
+ }
+ AcquireImpl(thr, pc, &s->clock);
+ s->last_lock = thr->fast_state.raw();
+ thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ s->mtx.ReadUnlock();
+}
+
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadUnlock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
+ addr);
+ PrintCurrentStack(thr, pc);
+ }
+ ReleaseImpl(thr, pc, &s->read_clock);
+ s->mtx.Unlock();
+ thr->mset.Del(s->GetId(), false);
+}
+
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ bool write = true;
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ StatInc(thr, StatMutexReadUnlock);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ ReleaseImpl(thr, pc, &s->read_clock);
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ } else if (!s->is_broken) {
+ s->is_broken = true;
+ Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
+ addr);
+ PrintCurrentStack(thr, pc);
+ }
+ thr->mset.Del(s->GetId(), write);
+ s->mtx.Unlock();
+}
+
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ s->owner_tid = SyncVar::kInvalidTid;
+ s->recursion = 0;
+ s->mtx.Unlock();
+}
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ AcquireImpl(thr, pc, &s->clock);
+ s->mtx.ReadUnlock();
+}
+
+static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status == ThreadStatusRunning)
+ thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
+ else
+ thr->clock.set(tctx->tid, tctx->epoch1);
+}
+
+void AcquireGlobal(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AcquireGlobal\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ ThreadRegistryLock l(CTX()->thread_registry);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateClockCallback, thr);
+}
+
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ CHECK_GT(thr->in_rtl, 0);
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+#ifndef TSAN_GO
+static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status == ThreadStatusRunning)
+ thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
+ else
+ thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
+}
+
+void AfterSleep(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ thr->last_sleep_stack_id = CurrentStackId(thr, pc);
+ ThreadRegistryLock l(CTX()->thread_registry);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateSleepClockCallback, thr);
+}
+#endif
+
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.acquire(c);
+ StatInc(thr, StatSyncAcquire);
+}
+
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.ReleaseStore(c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.acq_rel(c);
+ StatInc(thr, StatSyncAcquire);
+ StatInc(thr, StatSyncRelease);
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl_report.cc b/gcc-4.9/libsanitizer/tsan/tsan_rtl_report.cc
new file mode 100644
index 000000000..f2248afea
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl_report.cc
@@ -0,0 +1,722 @@
+//===-- tsan_rtl_report.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_fd.h"
+
+namespace __tsan {
+
+using namespace __sanitizer; // NOLINT
+
+static ReportStack *SymbolizeStack(const StackTrace& trace);
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ ScopedInRtl in_rtl;
+ Printf("FATAL: ThreadSanitizer CHECK failed: "
+ "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
+ file, line, cond, (uptr)v1, (uptr)v2);
+ PrintCurrentStackSlow();
+ Die();
+}
+
+// Can be overriden by an application/test to intercept reports.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnReport(const ReportDesc *rep, bool suppressed);
+#else
+SANITIZER_INTERFACE_ATTRIBUTE
+bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
+ (void)rep;
+ return suppressed;
+}
+#endif
+
+static void StackStripMain(ReportStack *stack) {
+ ReportStack *last_frame = 0;
+ ReportStack *last_frame2 = 0;
+ const char *prefix = "__interceptor_";
+ uptr prefix_len = internal_strlen(prefix);
+ const char *path_prefix = flags()->strip_path_prefix;
+ uptr path_prefix_len = internal_strlen(path_prefix);
+ char *pos;
+ for (ReportStack *ent = stack; ent; ent = ent->next) {
+ if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
+ ent->func += prefix_len;
+ if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
+ ent->file = pos + path_prefix_len;
+ if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
+ ent->file += 2;
+ last_frame2 = last_frame;
+ last_frame = ent;
+ }
+
+ if (last_frame2 == 0)
+ return;
+ const char *last = last_frame->func;
+#ifndef TSAN_GO
+ const char *last2 = last_frame2->func;
+ // Strip frame above 'main'
+ if (last2 && 0 == internal_strcmp(last2, "main")) {
+ last_frame2->next = 0;
+ // Strip our internal thread start routine.
+ } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
+ last_frame2->next = 0;
+ // Strip global ctors init.
+ } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ last_frame2->next = 0;
+ // If both are 0, then we probably just failed to symbolize.
+ } else if (last || last2) {
+ // Ensure that we recovered stack completely. Trimmed stack
+ // can actually happen if we do not instrument some code,
+ // so it's only a debug print. However we must try hard to not miss it
+ // due to our fault.
+ DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
+ }
+#else
+ // The last frame always point into runtime (gosched0, goexit0, runtime.main).
+ last_frame2->next = 0;
+ (void)last;
+#endif
+}
+
+#ifndef TSAN_GO
+ReportStack *SymbolizeStackId(u32 stack_id) {
+ uptr ssz = 0;
+ const uptr *stack = StackDepotGet(stack_id, &ssz);
+ if (stack == 0)
+ return 0;
+ StackTrace trace;
+ trace.Init(stack, ssz);
+ return SymbolizeStack(trace);
+}
+#endif
+
+static ReportStack *SymbolizeStack(const StackTrace& trace) {
+ if (trace.IsEmpty())
+ return 0;
+ ReportStack *stack = 0;
+ for (uptr si = 0; si < trace.Size(); si++) {
+ const uptr pc = trace.Get(si);
+#ifndef TSAN_GO
+ // We obtain the return address, that is, address of the next instruction,
+ // so offset it by 1 byte.
+ const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
+#else
+ // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
+ uptr pc1 = pc;
+ if (si != trace.Size() - 1)
+ pc1 -= 1;
+#endif
+ ReportStack *ent = SymbolizeCode(pc1);
+ CHECK_NE(ent, 0);
+ ReportStack *last = ent;
+ while (last->next) {
+ last->pc = pc; // restore original pc for report
+ last = last->next;
+ }
+ last->pc = pc; // restore original pc for report
+ last->next = stack;
+ stack = ent;
+ }
+ StackStripMain(stack);
+ return stack;
+}
+
+ScopedReport::ScopedReport(ReportType typ) {
+ ctx_ = CTX();
+ ctx_->thread_registry->CheckLocked();
+ void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
+ rep_ = new(mem) ReportDesc;
+ rep_->typ = typ;
+ ctx_->report_mtx.Lock();
+ CommonSanitizerReportMutex.Lock();
+}
+
+ScopedReport::~ScopedReport() {
+ CommonSanitizerReportMutex.Unlock();
+ ctx_->report_mtx.Unlock();
+ DestroyAndFree(rep_);
+}
+
+void ScopedReport::AddStack(const StackTrace *stack) {
+ ReportStack **rs = rep_->stacks.PushBack();
+ *rs = SymbolizeStack(*stack);
+}
+
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
+ const StackTrace *stack, const MutexSet *mset) {
+ void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
+ ReportMop *mop = new(mem) ReportMop;
+ rep_->mops.PushBack(mop);
+ mop->tid = s.tid();
+ mop->addr = addr + s.addr0();
+ mop->size = s.size();
+ mop->write = s.IsWrite();
+ mop->atomic = s.IsAtomic();
+ mop->stack = SymbolizeStack(*stack);
+ for (uptr i = 0; i < mset->Size(); i++) {
+ MutexSet::Desc d = mset->Get(i);
+ u64 uid = 0;
+ uptr addr = SyncVar::SplitId(d.id, &uid);
+ SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ ReportMopMutex mtx = {s->uid, d.write};
+ mop->mset.PushBack(mtx);
+ AddMutex(s);
+ } else {
+ ReportMopMutex mtx = {d.id, d.write};
+ mop->mset.PushBack(mtx);
+ AddMutex(d.id);
+ }
+ if (s)
+ s->mtx.ReadUnlock();
+ }
+}
+
+void ScopedReport::AddThread(const ThreadContext *tctx) {
+ for (uptr i = 0; i < rep_->threads.Size(); i++) {
+ if ((u32)rep_->threads[i]->id == tctx->tid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
+ ReportThread *rt = new(mem) ReportThread();
+ rep_->threads.PushBack(rt);
+ rt->id = tctx->tid;
+ rt->pid = tctx->os_id;
+ rt->running = (tctx->status == ThreadStatusRunning);
+ rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
+ rt->parent_tid = tctx->parent_tid;
+ rt->stack = 0;
+#ifdef TSAN_GO
+ rt->stack = SymbolizeStack(tctx->creation_stack);
+#else
+ rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+#endif
+}
+
+#ifndef TSAN_GO
+static ThreadContext *FindThreadByUidLocked(int unique_id) {
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ for (unsigned i = 0; i < kMaxTid; i++) {
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(i));
+ if (tctx && tctx->unique_id == (u32)unique_id) {
+ return tctx;
+ }
+ }
+ return 0;
+}
+
+static ThreadContext *FindThreadByTidLocked(int tid) {
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+}
+
+static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
+ uptr addr = (uptr)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status != ThreadStatusRunning)
+ return false;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
+ (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
+}
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
+ (void*)addr));
+ if (!tctx)
+ return 0;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
+ return tctx;
+}
+#endif
+
+void ScopedReport::AddMutex(const SyncVar *s) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == s->uid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex();
+ rep_->mutexes.PushBack(rm);
+ rm->id = s->uid;
+ rm->destroyed = false;
+ rm->stack = 0;
+#ifndef TSAN_GO
+ rm->stack = SymbolizeStackId(s->creation_stack_id);
+#endif
+}
+
+void ScopedReport::AddMutex(u64 id) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == id)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex();
+ rep_->mutexes.PushBack(rm);
+ rm->id = id;
+ rm->destroyed = true;
+ rm->stack = 0;
+}
+
+void ScopedReport::AddLocation(uptr addr, uptr size) {
+ if (addr == 0)
+ return;
+#ifndef TSAN_GO
+ int fd = -1;
+ int creat_tid = -1;
+ u32 creat_stack = 0;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
+ || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
+ void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
+ ReportLocation *loc = new(mem) ReportLocation();
+ rep_->locs.PushBack(loc);
+ loc->type = ReportLocationFD;
+ loc->fd = fd;
+ loc->tid = creat_tid;
+ loc->stack = SymbolizeStackId(creat_stack);
+ ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ MBlock *b = 0;
+ if (allocator()->PointerIsMine((void*)addr)
+ && (b = user_mblock(0, (void*)addr))) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
+ void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
+ ReportLocation *loc = new(mem) ReportLocation();
+ rep_->locs.PushBack(loc);
+ loc->type = ReportLocationHeap;
+ loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
+ loc->size = b->Size();
+ loc->tid = tctx ? tctx->tid : b->Tid();
+ loc->name = 0;
+ loc->file = 0;
+ loc->line = 0;
+ loc->stack = 0;
+ loc->stack = SymbolizeStackId(b->StackId());
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ bool is_stack = false;
+ if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
+ void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
+ ReportLocation *loc = new(mem) ReportLocation();
+ rep_->locs.PushBack(loc);
+ loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ loc->tid = tctx->tid;
+ AddThread(tctx);
+ }
+ ReportLocation *loc = SymbolizeData(addr);
+ if (loc) {
+ rep_->locs.PushBack(loc);
+ return;
+ }
+#endif
+}
+
+#ifndef TSAN_GO
+void ScopedReport::AddSleep(u32 stack_id) {
+ rep_->sleep = SymbolizeStackId(stack_id);
+}
+#endif
+
+void ScopedReport::SetCount(int count) {
+ rep_->count = count;
+}
+
+const ReportDesc *ScopedReport::GetReport() const {
+ return rep_;
+}
+
+void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+ // This function restores stack trace and mutex set for the thread/epoch.
+ // It does so by getting stack trace and mutex set at the beginning of
+ // trace part, and then replaying the trace till the given epoch.
+ Context *ctx = CTX();
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+ if (tctx == 0)
+ return;
+ if (tctx->status != ThreadStatusRunning
+ && tctx->status != ThreadStatusFinished
+ && tctx->status != ThreadStatusDead)
+ return;
+ Trace* trace = ThreadTrace(tctx->tid);
+ Lock l(&trace->mtx);
+ const int partidx = (epoch / kTracePartSize) % TraceParts();
+ TraceHeader* hdr = &trace->headers[partidx];
+ if (epoch < hdr->epoch0)
+ return;
+ const u64 epoch0 = RoundDown(epoch, TraceSize());
+ const u64 eend = epoch % TraceSize();
+ const u64 ebegin = RoundDown(eend, kTracePartSize);
+ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
+ tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
+ InternalScopedBuffer<uptr> stack(kShadowStackSize);
+ for (uptr i = 0; i < hdr->stack0.Size(); i++) {
+ stack[i] = hdr->stack0.Get(i);
+ DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
+ }
+ if (mset)
+ *mset = hdr->mset0;
+ uptr pos = hdr->stack0.Size();
+ Event *events = (Event*)GetThreadTrace(tid);
+ for (uptr i = ebegin; i <= eend; i++) {
+ Event ev = events[i];
+ EventType typ = (EventType)(ev >> 61);
+ uptr pc = (uptr)(ev & ((1ull << 61) - 1));
+ DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
+ if (typ == EventTypeMop) {
+ stack[pos] = pc;
+ } else if (typ == EventTypeFuncEnter) {
+ stack[pos++] = pc;
+ } else if (typ == EventTypeFuncExit) {
+ if (pos > 0)
+ pos--;
+ }
+ if (mset) {
+ if (typ == EventTypeLock) {
+ mset->Add(pc, true, epoch0 + i);
+ } else if (typ == EventTypeUnlock) {
+ mset->Del(pc, true);
+ } else if (typ == EventTypeRLock) {
+ mset->Add(pc, false, epoch0 + i);
+ } else if (typ == EventTypeRUnlock) {
+ mset->Del(pc, false);
+ }
+ }
+ for (uptr j = 0; j <= pos; j++)
+ DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ }
+ if (pos == 0 && stack[0] == 0)
+ return;
+ pos++;
+ stk->Init(stack.data(), pos);
+}
+
+static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
+ uptr addr_min, uptr addr_max) {
+ Context *ctx = CTX();
+ bool equal_stack = false;
+ RacyStacks hash;
+ if (flags()->suppress_equal_stacks) {
+ hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
+ equal_stack = true;
+ break;
+ }
+ }
+ }
+ bool equal_address = false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ if (flags()->suppress_equal_addresses) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
+ equal_address = true;
+ break;
+ }
+ }
+ }
+ if (equal_stack || equal_address) {
+ if (!equal_stack)
+ ctx->racy_stacks.PushBack(hash);
+ if (!equal_address)
+ ctx->racy_addresses.PushBack(ra0);
+ return true;
+ }
+ return false;
+}
+
+static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
+ uptr addr_min, uptr addr_max) {
+ Context *ctx = CTX();
+ if (flags()->suppress_equal_stacks) {
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ ctx->racy_stacks.PushBack(hash);
+ }
+ if (flags()->suppress_equal_addresses) {
+ RacyAddress ra0 = {addr_min, addr_max};
+ ctx->racy_addresses.PushBack(ra0);
+ }
+}
+
+bool OutputReport(Context *ctx,
+ const ScopedReport &srep,
+ const ReportStack *suppress_stack1,
+ const ReportStack *suppress_stack2,
+ const ReportLocation *suppress_loc) {
+ atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
+ const ReportDesc *rep = srep.GetReport();
+ Suppression *supp = 0;
+ uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
+ if (suppress_pc == 0)
+ suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
+ if (suppress_pc == 0)
+ suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
+ if (suppress_pc != 0) {
+ FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
+ ctx->fired_suppressions.push_back(s);
+ }
+ if (OnReport(rep, suppress_pc != 0))
+ return false;
+ PrintReport(rep);
+ ctx->nreported++;
+ if (flags()->halt_on_error)
+ internal__exit(flags()->exitcode);
+ return true;
+}
+
+bool IsFiredSuppression(Context *ctx,
+ const ScopedReport &srep,
+ const StackTrace &trace) {
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
+ continue;
+ for (uptr j = 0; j < trace.Size(); j++) {
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (trace.Get(j) == s->pc) {
+ if (s->supp)
+ s->supp->hit_count++;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static bool IsFiredSuppression(Context *ctx,
+ const ScopedReport &srep,
+ uptr addr) {
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
+ continue;
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (addr == s->pc) {
+ if (s->supp)
+ s->supp->hit_count++;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FrameIsInternal(const ReportStack *frame) {
+ return frame != 0 && frame->file != 0
+ && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
+ internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(frame->file, "tsan_interface_"));
+}
+
+// On programs that use Java we see weird reports like:
+// WARNING: ThreadSanitizer: data race (pid=22512)
+// Read of size 8 at 0x7d2b00084318 by thread 100:
+// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
+// #1 <null> <null>:0 (0x7f7ad9b40193)
+// Previous write of size 8 at 0x7d2b00084318 by thread 105:
+// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
+// #1 <null> <null>:0 (0x7f7ad9b42707)
+static bool IsJavaNonsense(const ReportDesc *rep) {
+#ifndef TSAN_GO
+ for (uptr i = 0; i < rep->mops.Size(); i++) {
+ ReportMop *mop = rep->mops[i];
+ ReportStack *frame = mop->stack;
+ if (frame == 0
+ || (frame->func == 0 && frame->file == 0 && frame->line == 0
+ && frame->module == 0)) {
+ return true;
+ }
+ if (FrameIsInternal(frame)) {
+ frame = frame->next;
+ if (frame == 0
+ || (frame->func == 0 && frame->file == 0 && frame->line == 0
+ && frame->module == 0)) {
+ if (frame) {
+ FiredSuppression supp = {rep->typ, frame->pc, 0};
+ CTX()->fired_suppressions.push_back(supp);
+ }
+ return true;
+ }
+ }
+ }
+#endif
+ return false;
+}
+
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
+ return false;
+}
+
+void ReportRace(ThreadState *thr) {
+ if (!flags()->report_bugs)
+ return;
+ ScopedInRtl in_rtl;
+
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
+
+ bool freed = false;
+ {
+ Shadow s(thr->racy_state[1]);
+ freed = s.GetFreedAndReset();
+ thr->racy_state[1] = s.raw();
+ }
+
+ uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
+ uptr addr_min = 0;
+ uptr addr_max = 0;
+ {
+ uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
+ uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
+ uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
+ uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
+ addr_min = min(a0, a1);
+ addr_max = max(e0, e1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
+ }
+
+ Context *ctx = CTX();
+ ThreadRegistryLock l0(ctx->thread_registry);
+
+ ReportType typ = ReportTypeRace;
+ if (thr->is_vptr_access)
+ typ = ReportTypeVptrRace;
+ else if (freed)
+ typ = ReportTypeUseAfterFree;
+ ScopedReport rep(typ);
+ if (IsFiredSuppression(ctx, rep, addr))
+ return;
+ const uptr kMop = 2;
+ StackTrace traces[kMop];
+ const uptr toppc = TraceTopPC(thr);
+ traces[0].ObtainCurrent(thr, toppc);
+ if (IsFiredSuppression(ctx, rep, traces[0]))
+ return;
+ InternalScopedBuffer<MutexSet> mset2(1);
+ new(mset2.data()) MutexSet();
+ Shadow s2(thr->racy_state[1]);
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
+ if (IsFiredSuppression(ctx, rep, traces[1]))
+ return;
+
+ if (HandleRacyStacks(thr, traces, addr_min, addr_max))
+ return;
+
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ rep.AddMemoryAccess(addr, s, &traces[i],
+ i == 0 ? &thr->mset : mset2.data());
+ }
+
+ if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
+ return;
+
+ for (uptr i = 0; i < kMop; i++) {
+ FastState s(thr->racy_state[i]);
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(s.tid()));
+ if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
+ continue;
+ rep.AddThread(tctx);
+ }
+
+ rep.AddLocation(addr_min, addr_max - addr_min);
+
+#ifndef TSAN_GO
+ { // NOLINT
+ Shadow s(thr->racy_state[1]);
+ if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+ }
+#endif
+
+ ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
+ rep.GetReport()->locs[0] : 0;
+ if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
+ rep.GetReport()->mops[1]->stack,
+ suppress_loc))
+ return;
+
+ AddRacyStacks(thr, traces, addr_min, addr_max);
+}
+
+void PrintCurrentStack(ThreadState *thr, uptr pc) {
+ StackTrace trace;
+ trace.ObtainCurrent(thr, pc);
+ PrintStack(SymbolizeStack(trace));
+}
+
+void PrintCurrentStackSlow() {
+#ifndef TSAN_GO
+ __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
+ sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
+ ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(),
+ 0, 0, 0, false);
+ for (uptr i = 0; i < ptrace->size / 2; i++) {
+ uptr tmp = ptrace->trace[i];
+ ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
+ ptrace->trace[ptrace->size - i - 1] = tmp;
+ }
+ StackTrace trace;
+ trace.Init(ptrace->trace, ptrace->size);
+ PrintStack(SymbolizeStack(trace));
+#endif
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_rtl_thread.cc b/gcc-4.9/libsanitizer/tsan/tsan_rtl_thread.cc
new file mode 100644
index 000000000..dea669835
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -0,0 +1,397 @@
+//===-- tsan_rtl_thread.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+// ThreadContext implementation.
+
+ThreadContext::ThreadContext(int tid)
+ : ThreadContextBase(tid)
+ , thr()
+ , sync()
+ , epoch0()
+ , epoch1() {
+}
+
+#ifndef TSAN_GO
+ThreadContext::~ThreadContext() {
+}
+#endif
+
+void ThreadContext::OnDead() {
+ sync.Reset();
+}
+
+void ThreadContext::OnJoined(void *arg) {
+ ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+ AcquireImpl(caller_thr, 0, &sync);
+ sync.Reset();
+}
+
+struct OnCreatedArgs {
+ ThreadState *thr;
+ uptr pc;
+};
+
+void ThreadContext::OnCreated(void *arg) {
+ thr = 0;
+ if (tid == 0)
+ return;
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ args->thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(args->thr, 0, &sync);
+#ifdef TSAN_GO
+ creation_stack.ObtainCurrent(args->thr, args->pc);
+#else
+ creation_stack_id = CurrentStackId(args->thr, args->pc);
+#endif
+ if (reuse_count == 0)
+ StatInc(args->thr, StatThreadMaxTid);
+}
+
+void ThreadContext::OnReset() {
+ sync.Reset();
+ FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
+}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
+ thr = args->thr;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+ epoch1 = (u64)-1;
+ new(thr) ThreadState(CTX(), tid, unique_id,
+ epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#ifndef TSAN_GO
+ thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
+#else
+ // Setup dynamic shadow stack.
+ const int kInitStackSize = 8;
+ thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
+ kInitStackSize * sizeof(uptr));
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
+#endif
+#ifndef TSAN_GO
+ AllocatorThreadStart(thr);
+#endif
+ thr->fast_synch_epoch = epoch0;
+ AcquireImpl(thr, 0, &sync);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ const uptr trace = (epoch0 / kTracePartSize) % TraceParts();
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ thr_trace->headers[trace].epoch0 = epoch0;
+ StatInc(thr, StatSyncAcquire);
+ sync.Reset();
+ DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)epoch0, args->stk_addr, args->stk_size,
+ args->tls_addr, args->tls_size);
+ thr->is_alive = true;
+}
+
+void ThreadContext::OnFinished() {
+ if (!detached) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, 0, &sync);
+ }
+ epoch1 = thr->fast_state.epoch();
+
+#ifndef TSAN_GO
+ AllocatorThreadFinish(thr);
+#endif
+ thr->~ThreadState();
+ StatAggregate(CTX()->stat, thr->stat);
+ thr = 0;
+}
+
+#ifndef TSAN_GO
+struct ThreadLeak {
+ ThreadContext *tctx;
+ int count;
+};
+
+static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
+ Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->detached || tctx->status != ThreadStatusFinished)
+ return;
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
+ leaks[i].count++;
+ return;
+ }
+ }
+ ThreadLeak leak = {tctx, 1};
+ leaks.PushBack(leak);
+}
+#endif
+
+#ifndef TSAN_GO
+static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
+ if (tctx->tid == 0) {
+ Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
+ } else {
+ Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
+ " created at:\n", tctx->tid, tctx->name);
+ PrintStack(SymbolizeStackId(tctx->creation_stack_id));
+ }
+ Printf(" One of the following ignores was not ended"
+ " (in order of probability)\n");
+ for (uptr i = 0; i < set->Size(); i++) {
+ Printf(" Ignore was enabled at:\n");
+ PrintStack(SymbolizeStackId(set->At(i)));
+ }
+ Die();
+}
+
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (thr->ignore_reads_and_writes)
+ ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
+ if (thr->ignore_sync)
+ ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
+}
+#else
+static void ThreadCheckIgnore(ThreadState *thr) {}
+#endif
+
+void ThreadFinalize(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ ThreadCheckIgnore(thr);
+#ifndef TSAN_GO
+ if (!flags()->report_thread_leaks)
+ return;
+ ThreadRegistryLock l(CTX()->thread_registry);
+ Vector<ThreadLeak> leaks(MBlockScopedBuf);
+ CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ MaybeReportThreadLeak, &leaks);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(leaks[i].tctx);
+ rep.SetCount(leaks[i].count);
+ OutputReport(CTX(), rep);
+ }
+#endif
+}
+
+int ThreadCount(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ Context *ctx = CTX();
+ uptr result;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ return (int)result;
+}
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ CHECK_GT(thr->in_rtl, 0);
+ StatInc(thr, StatThreadCreate);
+ Context *ctx = CTX();
+ OnCreatedArgs args = { thr, pc };
+ int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
+ StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
+ return tid;
+}
+
+void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
+ Context *ctx = CTX();
+ CHECK_GT(thr->in_rtl, 0);
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+
+ if (tid) {
+ if (stk_addr && stk_size)
+ MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
+
+ if (tls_addr && tls_size) {
+ // Check that the thr object is in tls;
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ CHECK_GE(thr_beg, tls_addr);
+ CHECK_LE(thr_beg, tls_addr + tls_size);
+ CHECK_GE(thr_end, tls_addr);
+ CHECK_LE(thr_end, tls_addr + tls_size);
+ // Since the thr object is huge, skip it.
+ MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/ 2,
+ thr_end, tls_addr + tls_size - thr_end);
+ }
+ }
+
+ ThreadRegistry *tr = ctx->thread_registry;
+ OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
+ tr->StartThread(tid, os_id, &args);
+
+ tr->Lock();
+ thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
+ tr->Unlock();
+}
+
+void ThreadFinish(ThreadState *thr) {
+ CHECK_GT(thr->in_rtl, 0);
+ ThreadCheckIgnore(thr);
+ StatInc(thr, StatThreadFinish);
+ if (thr->stk_addr && thr->stk_size)
+ DontNeedShadowFor(thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size)
+ DontNeedShadowFor(thr->tls_addr, thr->tls_size);
+ thr->is_alive = false;
+ Context *ctx = CTX();
+ ctx->thread_registry->FinishThread(thr->tid);
+}
+
+static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+ uptr uid = (uptr)arg;
+ if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
+ tctx->user_id = 0;
+ return true;
+ }
+ return false;
+}
+
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
+ CHECK_GT(thr->in_rtl, 0);
+ Context *ctx = CTX();
+ int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
+ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
+ return res;
+}
+
+void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
+ Context *ctx = CTX();
+ ctx->thread_registry->JoinThread(tid, thr);
+}
+
+void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(thr->in_rtl, 0);
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ Context *ctx = CTX();
+ ctx->thread_registry->DetachThread(tid);
+}
+
+void ThreadSetName(ThreadState *thr, const char *name) {
+ CHECK_GT(thr->in_rtl, 0);
+ CTX()->thread_registry->SetThreadName(thr->tid, name);
+}
+
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write) {
+ if (size == 0)
+ return;
+
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
+ thr->tid, (void*)pc, (void*)addr,
+ (int)size, is_write);
+
+#if TSAN_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+ if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
+ DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
+ }
+#endif
+
+ StatInc(thr, StatMopRange);
+
+ if (*shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMopRangeRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+
+ bool unaligned = (addr % kShadowCell) != 0;
+
+ // Handle unaligned beginning, if any.
+ for (; addr % kShadowCell && size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+ if (unaligned)
+ shadow_mem += kShadowCnt;
+ // Handle middle part, if any.
+ for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
+ int const kAccessSizeLog = 3;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ shadow_mem += kShadowCnt;
+ }
+ // Handle ending, if any.
+ for (; size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_stat.cc b/gcc-4.9/libsanitizer/tsan/tsan_stat.cc
new file mode 100644
index 000000000..6bc345397
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_stat.cc
@@ -0,0 +1,485 @@
+//===-- tsan_stat.cc ------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stat.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void StatAggregate(u64 *dst, u64 *src) {
+ if (!kCollectStats)
+ return;
+ for (int i = 0; i < StatCnt; i++)
+ dst[i] += src[i];
+}
+
+void StatOutput(u64 *stat) {
+ if (!kCollectStats)
+ return;
+
+ stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
+
+ static const char *name[StatCnt] = {};
+ name[StatMop] = "Memory accesses ";
+ name[StatMopRead] = " Including reads ";
+ name[StatMopWrite] = " writes ";
+ name[StatMop1] = " Including size 1 ";
+ name[StatMop2] = " size 2 ";
+ name[StatMop4] = " size 4 ";
+ name[StatMop8] = " size 8 ";
+ name[StatMopSame] = " Including same ";
+ name[StatMopRange] = " Including range ";
+ name[StatMopRodata] = " Including .rodata ";
+ name[StatMopRangeRodata] = " Including .rodata range ";
+ name[StatShadowProcessed] = "Shadow processed ";
+ name[StatShadowZero] = " Including empty ";
+ name[StatShadowNonZero] = " Including non empty ";
+ name[StatShadowSameSize] = " Including same size ";
+ name[StatShadowIntersect] = " intersect ";
+ name[StatShadowNotIntersect] = " not intersect ";
+ name[StatShadowSameThread] = " Including same thread ";
+ name[StatShadowAnotherThread] = " another thread ";
+ name[StatShadowReplace] = " Including evicted ";
+
+ name[StatFuncEnter] = "Function entries ";
+ name[StatFuncExit] = "Function exits ";
+ name[StatEvents] = "Events collected ";
+
+ name[StatThreadCreate] = "Total threads created ";
+ name[StatThreadFinish] = " threads finished ";
+ name[StatThreadReuse] = " threads reused ";
+ name[StatThreadMaxTid] = " max tid ";
+ name[StatThreadMaxAlive] = " max alive threads ";
+
+ name[StatMutexCreate] = "Mutexes created ";
+ name[StatMutexDestroy] = " destroyed ";
+ name[StatMutexLock] = " lock ";
+ name[StatMutexUnlock] = " unlock ";
+ name[StatMutexRecLock] = " recursive lock ";
+ name[StatMutexRecUnlock] = " recursive unlock ";
+ name[StatMutexReadLock] = " read lock ";
+ name[StatMutexReadUnlock] = " read unlock ";
+
+ name[StatSyncCreated] = "Sync objects created ";
+ name[StatSyncDestroyed] = " destroyed ";
+ name[StatSyncAcquire] = " acquired ";
+ name[StatSyncRelease] = " released ";
+
+ name[StatAtomic] = "Atomic operations ";
+ name[StatAtomicLoad] = " Including load ";
+ name[StatAtomicStore] = " store ";
+ name[StatAtomicExchange] = " exchange ";
+ name[StatAtomicFetchAdd] = " fetch_add ";
+ name[StatAtomicFetchSub] = " fetch_sub ";
+ name[StatAtomicFetchAnd] = " fetch_and ";
+ name[StatAtomicFetchOr] = " fetch_or ";
+ name[StatAtomicFetchXor] = " fetch_xor ";
+ name[StatAtomicFetchNand] = " fetch_nand ";
+ name[StatAtomicCAS] = " compare_exchange ";
+ name[StatAtomicFence] = " fence ";
+ name[StatAtomicRelaxed] = " Including relaxed ";
+ name[StatAtomicConsume] = " consume ";
+ name[StatAtomicAcquire] = " acquire ";
+ name[StatAtomicRelease] = " release ";
+ name[StatAtomicAcq_Rel] = " acq_rel ";
+ name[StatAtomicSeq_Cst] = " seq_cst ";
+ name[StatAtomic1] = " Including size 1 ";
+ name[StatAtomic2] = " size 2 ";
+ name[StatAtomic4] = " size 4 ";
+ name[StatAtomic8] = " size 8 ";
+ name[StatAtomic16] = " size 16 ";
+
+ name[StatInterceptor] = "Interceptors ";
+ name[StatInt_longjmp] = " longjmp ";
+ name[StatInt_siglongjmp] = " siglongjmp ";
+ name[StatInt_malloc] = " malloc ";
+ name[StatInt___libc_memalign] = " __libc_memalign ";
+ name[StatInt_calloc] = " calloc ";
+ name[StatInt_realloc] = " realloc ";
+ name[StatInt_free] = " free ";
+ name[StatInt_cfree] = " cfree ";
+ name[StatInt_malloc_usable_size] = " malloc_usable_size ";
+ name[StatInt_mmap] = " mmap ";
+ name[StatInt_mmap64] = " mmap64 ";
+ name[StatInt_munmap] = " munmap ";
+ name[StatInt_memalign] = " memalign ";
+ name[StatInt_valloc] = " valloc ";
+ name[StatInt_pvalloc] = " pvalloc ";
+ name[StatInt_posix_memalign] = " posix_memalign ";
+ name[StatInt__Znwm] = " _Znwm ";
+ name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
+ name[StatInt__Znam] = " _Znam ";
+ name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
+ name[StatInt__ZdlPv] = " _ZdlPv ";
+ name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
+ name[StatInt__ZdaPv] = " _ZdaPv ";
+ name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
+ name[StatInt_strlen] = " strlen ";
+ name[StatInt_memset] = " memset ";
+ name[StatInt_memcpy] = " memcpy ";
+ name[StatInt_textdomain] = " textdomain ";
+ name[StatInt_strcmp] = " strcmp ";
+ name[StatInt_memchr] = " memchr ";
+ name[StatInt_memrchr] = " memrchr ";
+ name[StatInt_memmove] = " memmove ";
+ name[StatInt_memcmp] = " memcmp ";
+ name[StatInt_strchr] = " strchr ";
+ name[StatInt_strchrnul] = " strchrnul ";
+ name[StatInt_strrchr] = " strrchr ";
+ name[StatInt_strncmp] = " strncmp ";
+ name[StatInt_strcpy] = " strcpy ";
+ name[StatInt_strncpy] = " strncpy ";
+ name[StatInt_strstr] = " strstr ";
+ name[StatInt_strdup] = " strdup ";
+ name[StatInt_strcasecmp] = " strcasecmp ";
+ name[StatInt_strncasecmp] = " strncasecmp ";
+ name[StatInt_atexit] = " atexit ";
+ name[StatInt__exit] = " _exit ";
+ name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
+ name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
+ name[StatInt___cxa_guard_abort] = " __cxa_guard_abort ";
+ name[StatInt_pthread_create] = " pthread_create ";
+ name[StatInt_pthread_join] = " pthread_join ";
+ name[StatInt_pthread_detach] = " pthread_detach ";
+ name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
+ name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
+ name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
+ name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
+ name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
+ name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
+ name[StatInt_pthread_spin_init] = " pthread_spin_init ";
+ name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
+ name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
+ name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
+ name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
+ name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
+ name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
+ name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
+ name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
+ name[StatInt_pthread_rwlock_timedrdlock]
+ = " pthread_rwlock_timedrdlock ";
+ name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
+ name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
+ name[StatInt_pthread_rwlock_timedwrlock]
+ = " pthread_rwlock_timedwrlock ";
+ name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
+ name[StatInt_pthread_cond_init] = " pthread_cond_init ";
+ name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
+ name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
+ name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
+ name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
+ name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
+ name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
+ name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
+ name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
+ name[StatInt_pthread_once] = " pthread_once ";
+ name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
+ name[StatInt_pthread_setname_np] = " pthread_setname_np ";
+ name[StatInt_sem_init] = " sem_init ";
+ name[StatInt_sem_destroy] = " sem_destroy ";
+ name[StatInt_sem_wait] = " sem_wait ";
+ name[StatInt_sem_trywait] = " sem_trywait ";
+ name[StatInt_sem_timedwait] = " sem_timedwait ";
+ name[StatInt_sem_post] = " sem_post ";
+ name[StatInt_sem_getvalue] = " sem_getvalue ";
+ name[StatInt_stat] = " stat ";
+ name[StatInt___xstat] = " __xstat ";
+ name[StatInt_stat64] = " stat64 ";
+ name[StatInt___xstat64] = " __xstat64 ";
+ name[StatInt_lstat] = " lstat ";
+ name[StatInt___lxstat] = " __lxstat ";
+ name[StatInt_lstat64] = " lstat64 ";
+ name[StatInt___lxstat64] = " __lxstat64 ";
+ name[StatInt_fstat] = " fstat ";
+ name[StatInt___fxstat] = " __fxstat ";
+ name[StatInt_fstat64] = " fstat64 ";
+ name[StatInt___fxstat64] = " __fxstat64 ";
+ name[StatInt_open] = " open ";
+ name[StatInt_open64] = " open64 ";
+ name[StatInt_creat] = " creat ";
+ name[StatInt_creat64] = " creat64 ";
+ name[StatInt_dup] = " dup ";
+ name[StatInt_dup2] = " dup2 ";
+ name[StatInt_dup3] = " dup3 ";
+ name[StatInt_eventfd] = " eventfd ";
+ name[StatInt_signalfd] = " signalfd ";
+ name[StatInt_inotify_init] = " inotify_init ";
+ name[StatInt_inotify_init1] = " inotify_init1 ";
+ name[StatInt_socket] = " socket ";
+ name[StatInt_socketpair] = " socketpair ";
+ name[StatInt_connect] = " connect ";
+ name[StatInt_bind] = " bind ";
+ name[StatInt_listen] = " listen ";
+ name[StatInt_accept] = " accept ";
+ name[StatInt_accept4] = " accept4 ";
+ name[StatInt_epoll_create] = " epoll_create ";
+ name[StatInt_epoll_create1] = " epoll_create1 ";
+ name[StatInt_close] = " close ";
+ name[StatInt___close] = " __close ";
+ name[StatInt___res_iclose] = " __res_iclose ";
+ name[StatInt_pipe] = " pipe ";
+ name[StatInt_pipe2] = " pipe2 ";
+ name[StatInt_read] = " read ";
+ name[StatInt_prctl] = " prctl ";
+ name[StatInt_pread] = " pread ";
+ name[StatInt_pread64] = " pread64 ";
+ name[StatInt_readv] = " readv ";
+ name[StatInt_preadv] = " preadv ";
+ name[StatInt_preadv64] = " preadv64 ";
+ name[StatInt_write] = " write ";
+ name[StatInt_pwrite] = " pwrite ";
+ name[StatInt_pwrite64] = " pwrite64 ";
+ name[StatInt_writev] = " writev ";
+ name[StatInt_pwritev] = " pwritev ";
+ name[StatInt_pwritev64] = " pwritev64 ";
+ name[StatInt_send] = " send ";
+ name[StatInt_sendmsg] = " sendmsg ";
+ name[StatInt_recv] = " recv ";
+ name[StatInt_recvmsg] = " recvmsg ";
+ name[StatInt_unlink] = " unlink ";
+ name[StatInt_fopen] = " fopen ";
+ name[StatInt_freopen] = " freopen ";
+ name[StatInt_fclose] = " fclose ";
+ name[StatInt_fread] = " fread ";
+ name[StatInt_fwrite] = " fwrite ";
+ name[StatInt_fflush] = " fflush ";
+ name[StatInt_abort] = " abort ";
+ name[StatInt_puts] = " puts ";
+ name[StatInt_rmdir] = " rmdir ";
+ name[StatInt_opendir] = " opendir ";
+ name[StatInt_epoll_ctl] = " epoll_ctl ";
+ name[StatInt_epoll_wait] = " epoll_wait ";
+ name[StatInt_poll] = " poll ";
+ name[StatInt_ppoll] = " ppoll ";
+ name[StatInt_sigaction] = " sigaction ";
+ name[StatInt_signal] = " signal ";
+ name[StatInt_sigsuspend] = " sigsuspend ";
+ name[StatInt_raise] = " raise ";
+ name[StatInt_kill] = " kill ";
+ name[StatInt_pthread_kill] = " pthread_kill ";
+ name[StatInt_sleep] = " sleep ";
+ name[StatInt_usleep] = " usleep ";
+ name[StatInt_nanosleep] = " nanosleep ";
+ name[StatInt_gettimeofday] = " gettimeofday ";
+ name[StatInt_fork] = " fork ";
+ name[StatInt_vscanf] = " vscanf ";
+ name[StatInt_vsscanf] = " vsscanf ";
+ name[StatInt_vfscanf] = " vfscanf ";
+ name[StatInt_scanf] = " scanf ";
+ name[StatInt_sscanf] = " sscanf ";
+ name[StatInt_fscanf] = " fscanf ";
+ name[StatInt___isoc99_vscanf] = " vscanf ";
+ name[StatInt___isoc99_vsscanf] = " vsscanf ";
+ name[StatInt___isoc99_vfscanf] = " vfscanf ";
+ name[StatInt___isoc99_scanf] = " scanf ";
+ name[StatInt___isoc99_sscanf] = " sscanf ";
+ name[StatInt___isoc99_fscanf] = " fscanf ";
+ name[StatInt_on_exit] = " on_exit ";
+ name[StatInt___cxa_atexit] = " __cxa_atexit ";
+ name[StatInt_localtime] = " localtime ";
+ name[StatInt_localtime_r] = " localtime_r ";
+ name[StatInt_gmtime] = " gmtime ";
+ name[StatInt_gmtime_r] = " gmtime_r ";
+ name[StatInt_ctime] = " ctime ";
+ name[StatInt_ctime_r] = " ctime_r ";
+ name[StatInt_asctime] = " asctime ";
+ name[StatInt_asctime_r] = " asctime_r ";
+ name[StatInt_strptime] = " strptime ";
+ name[StatInt_frexp] = " frexp ";
+ name[StatInt_frexpf] = " frexpf ";
+ name[StatInt_frexpl] = " frexpl ";
+ name[StatInt_getpwnam] = " getpwnam ";
+ name[StatInt_getpwuid] = " getpwuid ";
+ name[StatInt_getgrnam] = " getgrnam ";
+ name[StatInt_getgrgid] = " getgrgid ";
+ name[StatInt_getpwnam_r] = " getpwnam_r ";
+ name[StatInt_getpwuid_r] = " getpwuid_r ";
+ name[StatInt_getgrnam_r] = " getgrnam_r ";
+ name[StatInt_getgrgid_r] = " getgrgid_r ";
+ name[StatInt_clock_getres] = " clock_getres ";
+ name[StatInt_clock_gettime] = " clock_gettime ";
+ name[StatInt_clock_settime] = " clock_settime ";
+ name[StatInt_getitimer] = " getitimer ";
+ name[StatInt_setitimer] = " setitimer ";
+ name[StatInt_time] = " time ";
+ name[StatInt_glob] = " glob ";
+ name[StatInt_glob64] = " glob64 ";
+ name[StatInt_wait] = " wait ";
+ name[StatInt_waitid] = " waitid ";
+ name[StatInt_waitpid] = " waitpid ";
+ name[StatInt_wait3] = " wait3 ";
+ name[StatInt_wait4] = " wait4 ";
+ name[StatInt_inet_ntop] = " inet_ntop ";
+ name[StatInt_inet_pton] = " inet_pton ";
+ name[StatInt_inet_aton] = " inet_aton ";
+ name[StatInt_getaddrinfo] = " getaddrinfo ";
+ name[StatInt_getnameinfo] = " getnameinfo ";
+ name[StatInt_getsockname] = " getsockname ";
+ name[StatInt_gethostent] = " gethostent ";
+ name[StatInt_gethostbyname] = " gethostbyname ";
+ name[StatInt_gethostbyname2] = " gethostbyname2 ";
+ name[StatInt_gethostbyaddr] = " gethostbyaddr ";
+ name[StatInt_gethostent_r] = " gethostent_r ";
+ name[StatInt_gethostbyname_r] = " gethostbyname_r ";
+ name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
+ name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
+ name[StatInt_getsockopt] = " getsockopt ";
+ name[StatInt_modf] = " modf ";
+ name[StatInt_modff] = " modff ";
+ name[StatInt_modfl] = " modfl ";
+ name[StatInt_getpeername] = " getpeername ";
+ name[StatInt_ioctl] = " ioctl ";
+ name[StatInt_sysinfo] = " sysinfo ";
+ name[StatInt_readdir] = " readdir ";
+ name[StatInt_readdir64] = " readdir64 ";
+ name[StatInt_readdir_r] = " readdir_r ";
+ name[StatInt_readdir64_r] = " readdir64_r ";
+ name[StatInt_ptrace] = " ptrace ";
+ name[StatInt_setlocale] = " setlocale ";
+ name[StatInt_getcwd] = " getcwd ";
+ name[StatInt_get_current_dir_name] = " get_current_dir_name ";
+ name[StatInt_strtoimax] = " strtoimax ";
+ name[StatInt_strtoumax] = " strtoumax ";
+ name[StatInt_mbstowcs] = " mbstowcs ";
+ name[StatInt_mbsrtowcs] = " mbsrtowcs ";
+ name[StatInt_mbsnrtowcs] = " mbsnrtowcs ";
+ name[StatInt_wcstombs] = " wcstombs ";
+ name[StatInt_wcsrtombs] = " wcsrtombs ";
+ name[StatInt_wcsnrtombs] = " wcsnrtombs ";
+ name[StatInt_tcgetattr] = " tcgetattr ";
+ name[StatInt_realpath] = " realpath ";
+ name[StatInt_canonicalize_file_name] = " canonicalize_file_name ";
+ name[StatInt_confstr] = " confstr ";
+ name[StatInt_sched_getaffinity] = " sched_getaffinity ";
+ name[StatInt_strerror] = " strerror ";
+ name[StatInt_strerror_r] = " strerror_r ";
+ name[StatInt___xpg_strerror_r] = " __xpg_strerror_r ";
+ name[StatInt_scandir] = " scandir ";
+ name[StatInt_scandir64] = " scandir64 ";
+ name[StatInt_getgroups] = " getgroups ";
+ name[StatInt_wordexp] = " wordexp ";
+ name[StatInt_sigwait] = " sigwait ";
+ name[StatInt_sigwaitinfo] = " sigwaitinfo ";
+ name[StatInt_sigtimedwait] = " sigtimedwait ";
+ name[StatInt_sigemptyset] = " sigemptyset ";
+ name[StatInt_sigfillset] = " sigfillset ";
+ name[StatInt_sigpending] = " sigpending ";
+ name[StatInt_sigprocmask] = " sigprocmask ";
+ name[StatInt_backtrace] = " backtrace ";
+ name[StatInt_backtrace_symbols] = " backtrace_symbols ";
+ name[StatInt_dlopen] = " dlopen ";
+ name[StatInt_dlclose] = " dlclose ";
+ name[StatInt_getmntent] = " getmntent ";
+ name[StatInt_getmntent_r] = " getmntent_r ";
+ name[StatInt_statfs] = " statfs ";
+ name[StatInt_statfs64] = " statfs64 ";
+ name[StatInt_fstatfs] = " fstatfs ";
+ name[StatInt_fstatfs64] = " fstatfs64 ";
+ name[StatInt_statvfs] = " statvfs ";
+ name[StatInt_statvfs64] = " statvfs64 ";
+ name[StatInt_fstatvfs] = " fstatvfs ";
+ name[StatInt_fstatvfs64] = " fstatvfs64 ";
+ name[StatInt_initgroups] = " initgroups ";
+ name[StatInt_ether_ntoa] = " ether_ntoa ";
+ name[StatInt_ether_aton] = " ether_aton ";
+ name[StatInt_ether_ntoa_r] = " ether_ntoa_r ";
+ name[StatInt_ether_aton_r] = " ether_aton_r ";
+ name[StatInt_ether_ntohost] = " ether_ntohost ";
+ name[StatInt_ether_hostton] = " ether_hostton ";
+ name[StatInt_ether_line] = " ether_line ";
+ name[StatInt_shmctl] = " shmctl ";
+ name[StatInt_random_r] = " random_r ";
+ name[StatInt_tmpnam] = " tmpnam ";
+ name[StatInt_tmpnam_r] = " tmpnam_r ";
+ name[StatInt_tempnam] = " tempnam ";
+ name[StatInt_sincos] = " sincos ";
+ name[StatInt_sincosf] = " sincosf ";
+ name[StatInt_sincosl] = " sincosl ";
+ name[StatInt_remquo] = " remquo ";
+ name[StatInt_remquof] = " remquof ";
+ name[StatInt_remquol] = " remquol ";
+ name[StatInt_lgamma] = " lgamma ";
+ name[StatInt_lgammaf] = " lgammaf ";
+ name[StatInt_lgammal] = " lgammal ";
+ name[StatInt_lgamma_r] = " lgamma_r ";
+ name[StatInt_lgammaf_r] = " lgammaf_r ";
+ name[StatInt_lgammal_r] = " lgammal_r ";
+ name[StatInt_drand48_r] = " drand48_r ";
+ name[StatInt_lrand48_r] = " lrand48_r ";
+ name[StatInt_getline] = " getline ";
+ name[StatInt_getdelim] = " getdelim ";
+ name[StatInt_iconv] = " iconv ";
+ name[StatInt_times] = " times ";
+
+ name[StatInt_pthread_attr_getdetachstate] = " pthread_addr_getdetachstate "; // NOLINT
+ name[StatInt_pthread_attr_getguardsize] = " pthread_addr_getguardsize "; // NOLINT
+ name[StatInt_pthread_attr_getschedparam] = " pthread_addr_getschedparam "; // NOLINT
+ name[StatInt_pthread_attr_getschedpolicy] = " pthread_addr_getschedpolicy "; // NOLINT
+ name[StatInt_pthread_attr_getinheritsched] = " pthread_addr_getinheritsched "; // NOLINT
+ name[StatInt_pthread_attr_getscope] = " pthread_addr_getscope "; // NOLINT
+ name[StatInt_pthread_attr_getstacksize] = " pthread_addr_getstacksize "; // NOLINT
+ name[StatInt_pthread_attr_getstack] = " pthread_addr_getstack "; // NOLINT
+ name[StatInt_pthread_attr_getaffinity_np] = " pthread_addr_getaffinity_np "; // NOLINT
+
+ name[StatAnnotation] = "Dynamic annotations ";
+ name[StatAnnotateHappensBefore] = " HappensBefore ";
+ name[StatAnnotateHappensAfter] = " HappensAfter ";
+ name[StatAnnotateCondVarSignal] = " CondVarSignal ";
+ name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
+ name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
+ name[StatAnnotateCondVarWait] = " CondVarWait ";
+ name[StatAnnotateRWLockCreate] = " RWLockCreate ";
+ name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
+ name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
+ name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
+ name[StatAnnotateRWLockReleased] = " RWLockReleased ";
+ name[StatAnnotateTraceMemory] = " TraceMemory ";
+ name[StatAnnotateFlushState] = " FlushState ";
+ name[StatAnnotateNewMemory] = " NewMemory ";
+ name[StatAnnotateNoOp] = " NoOp ";
+ name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
+ name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
+ name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
+ name[StatAnnotatePCQGet] = " PCQGet ";
+ name[StatAnnotatePCQPut] = " PCQPut ";
+ name[StatAnnotatePCQDestroy] = " PCQDestroy ";
+ name[StatAnnotatePCQCreate] = " PCQCreate ";
+ name[StatAnnotateExpectRace] = " ExpectRace ";
+ name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
+ name[StatAnnotateBenignRace] = " BenignRace ";
+ name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
+ name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
+ name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
+ name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
+ name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin ";
+ name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd ";
+ name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
+ name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
+ name[StatAnnotateThreadName] = " ThreadName ";
+
+ name[StatMtxTotal] = "Contentionz ";
+ name[StatMtxTrace] = " Trace ";
+ name[StatMtxThreads] = " Threads ";
+ name[StatMtxReport] = " Report ";
+ name[StatMtxSyncVar] = " SyncVar ";
+ name[StatMtxSyncTab] = " SyncTab ";
+ name[StatMtxSlab] = " Slab ";
+ name[StatMtxAtExit] = " Atexit ";
+ name[StatMtxAnnotations] = " Annotations ";
+ name[StatMtxMBlock] = " MBlock ";
+ name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxFD] = " FD ";
+
+ Printf("Statistics:\n");
+ for (int i = 0; i < StatCnt; i++)
+ Printf("%s: %zu\n", name[i], (uptr)stat[i]);
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_stat.h b/gcc-4.9/libsanitizer/tsan/tsan_stat.h
new file mode 100644
index 000000000..3e08313d1
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_stat.h
@@ -0,0 +1,483 @@
+//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_STAT_H
+#define TSAN_STAT_H
+
+namespace __tsan {
+
+enum StatType {
+ // Memory access processing related stuff.
+ StatMop,
+ StatMopRead,
+ StatMopWrite,
+ StatMop1, // These must be consequtive.
+ StatMop2,
+ StatMop4,
+ StatMop8,
+ StatMopSame,
+ StatMopRange,
+ StatMopRodata,
+ StatMopRangeRodata,
+ StatShadowProcessed,
+ StatShadowZero,
+ StatShadowNonZero, // Derived.
+ StatShadowSameSize,
+ StatShadowIntersect,
+ StatShadowNotIntersect,
+ StatShadowSameThread,
+ StatShadowAnotherThread,
+ StatShadowReplace,
+
+ // Func processing.
+ StatFuncEnter,
+ StatFuncExit,
+
+ // Trace processing.
+ StatEvents,
+
+ // Threads.
+ StatThreadCreate,
+ StatThreadFinish,
+ StatThreadReuse,
+ StatThreadMaxTid,
+ StatThreadMaxAlive,
+
+ // Mutexes.
+ StatMutexCreate,
+ StatMutexDestroy,
+ StatMutexLock,
+ StatMutexUnlock,
+ StatMutexRecLock,
+ StatMutexRecUnlock,
+ StatMutexReadLock,
+ StatMutexReadUnlock,
+
+ // Synchronization.
+ StatSyncCreated,
+ StatSyncDestroyed,
+ StatSyncAcquire,
+ StatSyncRelease,
+
+ // Atomics.
+ StatAtomic,
+ StatAtomicLoad,
+ StatAtomicStore,
+ StatAtomicExchange,
+ StatAtomicFetchAdd,
+ StatAtomicFetchSub,
+ StatAtomicFetchAnd,
+ StatAtomicFetchOr,
+ StatAtomicFetchXor,
+ StatAtomicFetchNand,
+ StatAtomicCAS,
+ StatAtomicFence,
+ StatAtomicRelaxed,
+ StatAtomicConsume,
+ StatAtomicAcquire,
+ StatAtomicRelease,
+ StatAtomicAcq_Rel,
+ StatAtomicSeq_Cst,
+ StatAtomic1,
+ StatAtomic2,
+ StatAtomic4,
+ StatAtomic8,
+ StatAtomic16,
+
+ // Interceptors.
+ StatInterceptor,
+ StatInt_longjmp,
+ StatInt_siglongjmp,
+ StatInt_malloc,
+ StatInt___libc_memalign,
+ StatInt_calloc,
+ StatInt_realloc,
+ StatInt_free,
+ StatInt_cfree,
+ StatInt_malloc_usable_size,
+ StatInt_mmap,
+ StatInt_mmap64,
+ StatInt_munmap,
+ StatInt_memalign,
+ StatInt_valloc,
+ StatInt_pvalloc,
+ StatInt_posix_memalign,
+ StatInt__Znwm,
+ StatInt__ZnwmRKSt9nothrow_t,
+ StatInt__Znam,
+ StatInt__ZnamRKSt9nothrow_t,
+ StatInt__ZdlPv,
+ StatInt__ZdlPvRKSt9nothrow_t,
+ StatInt__ZdaPv,
+ StatInt__ZdaPvRKSt9nothrow_t,
+ StatInt_strlen,
+ StatInt_memset,
+ StatInt_memcpy,
+ StatInt_textdomain,
+ StatInt_strcmp,
+ StatInt_memchr,
+ StatInt_memrchr,
+ StatInt_memmove,
+ StatInt_memcmp,
+ StatInt_strchr,
+ StatInt_strchrnul,
+ StatInt_strrchr,
+ StatInt_strncmp,
+ StatInt_strcpy,
+ StatInt_strncpy,
+ StatInt_strcasecmp,
+ StatInt_strncasecmp,
+ StatInt_strstr,
+ StatInt_strdup,
+ StatInt_atexit,
+ StatInt__exit,
+ StatInt___cxa_guard_acquire,
+ StatInt___cxa_guard_release,
+ StatInt___cxa_guard_abort,
+ StatInt_pthread_create,
+ StatInt_pthread_join,
+ StatInt_pthread_detach,
+ StatInt_pthread_mutex_init,
+ StatInt_pthread_mutex_destroy,
+ StatInt_pthread_mutex_lock,
+ StatInt_pthread_mutex_trylock,
+ StatInt_pthread_mutex_timedlock,
+ StatInt_pthread_mutex_unlock,
+ StatInt_pthread_spin_init,
+ StatInt_pthread_spin_destroy,
+ StatInt_pthread_spin_lock,
+ StatInt_pthread_spin_trylock,
+ StatInt_pthread_spin_unlock,
+ StatInt_pthread_rwlock_init,
+ StatInt_pthread_rwlock_destroy,
+ StatInt_pthread_rwlock_rdlock,
+ StatInt_pthread_rwlock_tryrdlock,
+ StatInt_pthread_rwlock_timedrdlock,
+ StatInt_pthread_rwlock_wrlock,
+ StatInt_pthread_rwlock_trywrlock,
+ StatInt_pthread_rwlock_timedwrlock,
+ StatInt_pthread_rwlock_unlock,
+ StatInt_pthread_cond_init,
+ StatInt_pthread_cond_destroy,
+ StatInt_pthread_cond_signal,
+ StatInt_pthread_cond_broadcast,
+ StatInt_pthread_cond_wait,
+ StatInt_pthread_cond_timedwait,
+ StatInt_pthread_barrier_init,
+ StatInt_pthread_barrier_destroy,
+ StatInt_pthread_barrier_wait,
+ StatInt_pthread_once,
+ StatInt_pthread_getschedparam,
+ StatInt_pthread_setname_np,
+ StatInt_sem_init,
+ StatInt_sem_destroy,
+ StatInt_sem_wait,
+ StatInt_sem_trywait,
+ StatInt_sem_timedwait,
+ StatInt_sem_post,
+ StatInt_sem_getvalue,
+ StatInt_stat,
+ StatInt___xstat,
+ StatInt_stat64,
+ StatInt___xstat64,
+ StatInt_lstat,
+ StatInt___lxstat,
+ StatInt_lstat64,
+ StatInt___lxstat64,
+ StatInt_fstat,
+ StatInt___fxstat,
+ StatInt_fstat64,
+ StatInt___fxstat64,
+ StatInt_open,
+ StatInt_open64,
+ StatInt_creat,
+ StatInt_creat64,
+ StatInt_dup,
+ StatInt_dup2,
+ StatInt_dup3,
+ StatInt_eventfd,
+ StatInt_signalfd,
+ StatInt_inotify_init,
+ StatInt_inotify_init1,
+ StatInt_socket,
+ StatInt_socketpair,
+ StatInt_connect,
+ StatInt_bind,
+ StatInt_listen,
+ StatInt_accept,
+ StatInt_accept4,
+ StatInt_epoll_create,
+ StatInt_epoll_create1,
+ StatInt_close,
+ StatInt___close,
+ StatInt___res_iclose,
+ StatInt_pipe,
+ StatInt_pipe2,
+ StatInt_read,
+ StatInt_prctl,
+ StatInt_pread,
+ StatInt_pread64,
+ StatInt_readv,
+ StatInt_preadv,
+ StatInt_preadv64,
+ StatInt_write,
+ StatInt_pwrite,
+ StatInt_pwrite64,
+ StatInt_writev,
+ StatInt_pwritev,
+ StatInt_pwritev64,
+ StatInt_send,
+ StatInt_sendmsg,
+ StatInt_recv,
+ StatInt_recvmsg,
+ StatInt_unlink,
+ StatInt_fopen,
+ StatInt_freopen,
+ StatInt_fclose,
+ StatInt_fread,
+ StatInt_fwrite,
+ StatInt_fflush,
+ StatInt_abort,
+ StatInt_puts,
+ StatInt_rmdir,
+ StatInt_opendir,
+ StatInt_epoll_ctl,
+ StatInt_epoll_wait,
+ StatInt_poll,
+ StatInt_ppoll,
+ StatInt_sigaction,
+ StatInt_signal,
+ StatInt_sigsuspend,
+ StatInt_raise,
+ StatInt_kill,
+ StatInt_pthread_kill,
+ StatInt_sleep,
+ StatInt_usleep,
+ StatInt_nanosleep,
+ StatInt_gettimeofday,
+ StatInt_fork,
+ StatInt_vscanf,
+ StatInt_vsscanf,
+ StatInt_vfscanf,
+ StatInt_scanf,
+ StatInt_sscanf,
+ StatInt_fscanf,
+ StatInt___isoc99_vscanf,
+ StatInt___isoc99_vsscanf,
+ StatInt___isoc99_vfscanf,
+ StatInt___isoc99_scanf,
+ StatInt___isoc99_sscanf,
+ StatInt___isoc99_fscanf,
+ StatInt_on_exit,
+ StatInt___cxa_atexit,
+ StatInt_localtime,
+ StatInt_localtime_r,
+ StatInt_gmtime,
+ StatInt_gmtime_r,
+ StatInt_ctime,
+ StatInt_ctime_r,
+ StatInt_asctime,
+ StatInt_asctime_r,
+ StatInt_strptime,
+ StatInt_frexp,
+ StatInt_frexpf,
+ StatInt_frexpl,
+ StatInt_getpwnam,
+ StatInt_getpwuid,
+ StatInt_getgrnam,
+ StatInt_getgrgid,
+ StatInt_getpwnam_r,
+ StatInt_getpwuid_r,
+ StatInt_getgrnam_r,
+ StatInt_getgrgid_r,
+ StatInt_clock_getres,
+ StatInt_clock_gettime,
+ StatInt_clock_settime,
+ StatInt_getitimer,
+ StatInt_setitimer,
+ StatInt_time,
+ StatInt_glob,
+ StatInt_glob64,
+ StatInt_wait,
+ StatInt_waitid,
+ StatInt_waitpid,
+ StatInt_wait3,
+ StatInt_wait4,
+ StatInt_inet_ntop,
+ StatInt_inet_pton,
+ StatInt_inet_aton,
+ StatInt_getaddrinfo,
+ StatInt_getnameinfo,
+ StatInt_getsockname,
+ StatInt_gethostent,
+ StatInt_gethostbyname,
+ StatInt_gethostbyname2,
+ StatInt_gethostbyaddr,
+ StatInt_gethostent_r,
+ StatInt_gethostbyname_r,
+ StatInt_gethostbyname2_r,
+ StatInt_gethostbyaddr_r,
+ StatInt_getsockopt,
+ StatInt_modf,
+ StatInt_modff,
+ StatInt_modfl,
+ StatInt_getpeername,
+ StatInt_ioctl,
+ StatInt_sysinfo,
+ StatInt_readdir,
+ StatInt_readdir64,
+ StatInt_readdir_r,
+ StatInt_readdir64_r,
+ StatInt_ptrace,
+ StatInt_setlocale,
+ StatInt_getcwd,
+ StatInt_get_current_dir_name,
+ StatInt_strtoimax,
+ StatInt_strtoumax,
+ StatInt_mbstowcs,
+ StatInt_mbsrtowcs,
+ StatInt_mbsnrtowcs,
+ StatInt_wcstombs,
+ StatInt_wcsrtombs,
+ StatInt_wcsnrtombs,
+ StatInt_tcgetattr,
+ StatInt_realpath,
+ StatInt_canonicalize_file_name,
+ StatInt_confstr,
+ StatInt_sched_getaffinity,
+ StatInt_strerror,
+ StatInt_strerror_r,
+ StatInt___xpg_strerror_r,
+ StatInt_scandir,
+ StatInt_scandir64,
+ StatInt_getgroups,
+ StatInt_wordexp,
+ StatInt_sigwait,
+ StatInt_sigwaitinfo,
+ StatInt_sigtimedwait,
+ StatInt_sigemptyset,
+ StatInt_sigfillset,
+ StatInt_sigpending,
+ StatInt_sigprocmask,
+ StatInt_backtrace,
+ StatInt_backtrace_symbols,
+ StatInt_dlopen,
+ StatInt_dlclose,
+ StatInt_getmntent,
+ StatInt_getmntent_r,
+ StatInt_statfs,
+ StatInt_statfs64,
+ StatInt_fstatfs,
+ StatInt_fstatfs64,
+ StatInt_statvfs,
+ StatInt_statvfs64,
+ StatInt_fstatvfs,
+ StatInt_fstatvfs64,
+ StatInt_initgroups,
+ StatInt_ether_ntoa,
+ StatInt_ether_aton,
+ StatInt_ether_ntoa_r,
+ StatInt_ether_aton_r,
+ StatInt_ether_ntohost,
+ StatInt_ether_hostton,
+ StatInt_ether_line,
+ StatInt_shmctl,
+ StatInt_random_r,
+ StatInt_tmpnam,
+ StatInt_tmpnam_r,
+ StatInt_tempnam,
+ StatInt_sincos,
+ StatInt_sincosf,
+ StatInt_sincosl,
+ StatInt_remquo,
+ StatInt_remquof,
+ StatInt_remquol,
+ StatInt_lgamma,
+ StatInt_lgammaf,
+ StatInt_lgammal,
+ StatInt_lgamma_r,
+ StatInt_lgammaf_r,
+ StatInt_lgammal_r,
+ StatInt_drand48_r,
+ StatInt_lrand48_r,
+ StatInt_getline,
+ StatInt_getdelim,
+ StatInt_iconv,
+ StatInt_times,
+
+ StatInt_pthread_attr_getdetachstate,
+ StatInt_pthread_attr_getguardsize,
+ StatInt_pthread_attr_getschedparam,
+ StatInt_pthread_attr_getschedpolicy,
+ StatInt_pthread_attr_getinheritsched,
+ StatInt_pthread_attr_getscope,
+ StatInt_pthread_attr_getstacksize,
+ StatInt_pthread_attr_getstack,
+ StatInt_pthread_attr_getaffinity_np,
+
+ // Dynamic annotations.
+ StatAnnotation,
+ StatAnnotateHappensBefore,
+ StatAnnotateHappensAfter,
+ StatAnnotateCondVarSignal,
+ StatAnnotateCondVarSignalAll,
+ StatAnnotateMutexIsNotPHB,
+ StatAnnotateCondVarWait,
+ StatAnnotateRWLockCreate,
+ StatAnnotateRWLockCreateStatic,
+ StatAnnotateRWLockDestroy,
+ StatAnnotateRWLockAcquired,
+ StatAnnotateRWLockReleased,
+ StatAnnotateTraceMemory,
+ StatAnnotateFlushState,
+ StatAnnotateNewMemory,
+ StatAnnotateNoOp,
+ StatAnnotateFlushExpectedRaces,
+ StatAnnotateEnableRaceDetection,
+ StatAnnotateMutexIsUsedAsCondVar,
+ StatAnnotatePCQGet,
+ StatAnnotatePCQPut,
+ StatAnnotatePCQDestroy,
+ StatAnnotatePCQCreate,
+ StatAnnotateExpectRace,
+ StatAnnotateBenignRaceSized,
+ StatAnnotateBenignRace,
+ StatAnnotateIgnoreReadsBegin,
+ StatAnnotateIgnoreReadsEnd,
+ StatAnnotateIgnoreWritesBegin,
+ StatAnnotateIgnoreWritesEnd,
+ StatAnnotateIgnoreSyncBegin,
+ StatAnnotateIgnoreSyncEnd,
+ StatAnnotatePublishMemoryRange,
+ StatAnnotateUnpublishMemoryRange,
+ StatAnnotateThreadName,
+
+ // Internal mutex contentionz.
+ StatMtxTotal,
+ StatMtxTrace,
+ StatMtxThreads,
+ StatMtxReport,
+ StatMtxSyncVar,
+ StatMtxSyncTab,
+ StatMtxSlab,
+ StatMtxAnnotations,
+ StatMtxAtExit,
+ StatMtxMBlock,
+ StatMtxJavaMBlock,
+ StatMtxFD,
+
+ // This must be the last.
+ StatCnt
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STAT_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_suppressions.cc b/gcc-4.9/libsanitizer/tsan/tsan_suppressions.cc
new file mode 100644
index 000000000..fa0c30dc2
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_suppressions.cc
@@ -0,0 +1,168 @@
+//===-- tsan_suppressions.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+// Suppressions for true/false positives in standard libraries.
+static const char *const std_suppressions =
+// Libstdc++ 4.4 has data races in std::string.
+// See http://crbug.com/181502 for an example.
+"race:^_M_rep$\n"
+"race:^_M_is_leaked$\n"
+// False positive when using std <thread>.
+// Happens because we miss atomic synchronization in libstdc++.
+// See http://llvm.org/bugs/show_bug.cgi?id=17066 for details.
+"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
+
+// Can be overriden in frontend.
+#ifndef TSAN_GO
+extern "C" const char *WEAK __tsan_default_suppressions() {
+ return 0;
+}
+#endif
+
+namespace __tsan {
+
+static SuppressionContext* g_ctx;
+
+static char *ReadFile(const char *filename) {
+ if (filename == 0 || filename[0] == 0)
+ return 0;
+ InternalScopedBuffer<char> tmp(4*1024);
+ if (filename[0] == '/' || GetPwd() == 0)
+ internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
+ else
+ internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
+ uptr openrv = OpenFile(tmp.data(), false);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
+ tmp.data());
+ Die();
+ }
+ fd_t fd = openrv;
+ const uptr fsize = internal_filesize(fd);
+ if (fsize == (uptr)-1) {
+ Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
+ tmp.data());
+ Die();
+ }
+ char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
+ if (fsize != internal_read(fd, buf, fsize)) {
+ Printf("ThreadSanitizer: failed to read suppressions file '%s'\n",
+ tmp.data());
+ Die();
+ }
+ internal_close(fd);
+ buf[fsize] = 0;
+ return buf;
+}
+
+void InitializeSuppressions() {
+ ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
+ g_ctx = new(placeholder_) SuppressionContext;
+ const char *supp = ReadFile(flags()->suppressions);
+ g_ctx->Parse(supp);
+#ifndef TSAN_GO
+ supp = __tsan_default_suppressions();
+ g_ctx->Parse(supp);
+ g_ctx->Parse(std_suppressions);
+#endif
+}
+
+SuppressionContext *GetSuppressionContext() {
+ CHECK_NE(g_ctx, 0);
+ return g_ctx;
+}
+
+SuppressionType conv(ReportType typ) {
+ if (typ == ReportTypeRace)
+ return SuppressionRace;
+ else if (typ == ReportTypeVptrRace)
+ return SuppressionRace;
+ else if (typ == ReportTypeUseAfterFree)
+ return SuppressionRace;
+ else if (typ == ReportTypeThreadLeak)
+ return SuppressionThread;
+ else if (typ == ReportTypeMutexDestroyLocked)
+ return SuppressionMutex;
+ else if (typ == ReportTypeSignalUnsafe)
+ return SuppressionSignal;
+ else if (typ == ReportTypeErrnoInSignal)
+ return SuppressionNone;
+ Printf("ThreadSanitizer: unknown report type %d\n", typ),
+ Die();
+}
+
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
+ CHECK(g_ctx);
+ if (!g_ctx->SuppressionCount() || stack == 0) return 0;
+ SuppressionType stype = conv(typ);
+ if (stype == SuppressionNone)
+ return 0;
+ Suppression *s;
+ for (const ReportStack *frame = stack; frame; frame = frame->next) {
+ if (g_ctx->Match(frame->func, stype, &s) ||
+ g_ctx->Match(frame->file, stype, &s) ||
+ g_ctx->Match(frame->module, stype, &s)) {
+ DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ s->hit_count++;
+ *sp = s;
+ return frame->pc;
+ }
+ }
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
+ CHECK(g_ctx);
+ if (!g_ctx->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal)
+ return 0;
+ SuppressionType stype = conv(typ);
+ if (stype == SuppressionNone)
+ return 0;
+ Suppression *s;
+ if (g_ctx->Match(loc->name, stype, &s) ||
+ g_ctx->Match(loc->file, stype, &s) ||
+ g_ctx->Match(loc->module, stype, &s)) {
+ DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ s->hit_count++;
+ *sp = s;
+ return loc->addr;
+ }
+ return 0;
+}
+
+void PrintMatchedSuppressions() {
+ CHECK(g_ctx);
+ InternalMmapVector<Suppression *> matched(1);
+ g_ctx->GetMatched(&matched);
+ if (!matched.size())
+ return;
+ int hit_count = 0;
+ for (uptr i = 0; i < matched.size(); i++)
+ hit_count += matched[i]->hit_count;
+ Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
+ (int)internal_getpid());
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%d %s:%s\n", matched[i]->hit_count,
+ SuppressionTypeString(matched[i]->type), matched[i]->templ);
+ }
+}
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_suppressions.h b/gcc-4.9/libsanitizer/tsan/tsan_suppressions.h
new file mode 100644
index 000000000..2939e9a8b
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_suppressions.h
@@ -0,0 +1,27 @@
+//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SUPPRESSIONS_H
+#define TSAN_SUPPRESSIONS_H
+
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void InitializeSuppressions();
+void PrintMatchedSuppressions();
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
+SuppressionContext *GetSuppressionContext();
+
+} // namespace __tsan
+
+#endif // TSAN_SUPPRESSIONS_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_symbolize.cc b/gcc-4.9/libsanitizer/tsan/tsan_symbolize.cc
new file mode 100644
index 000000000..c0e794be2
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_symbolize.cc
@@ -0,0 +1,156 @@
+//===-- tsan_symbolize.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_symbolize.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_flags.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void EnterSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(!thr->in_symbolizer);
+ thr->in_symbolizer = true;
+}
+
+void ExitSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(thr->in_symbolizer);
+ thr->in_symbolizer = false;
+}
+
+ReportStack *NewReportStackEntry(uptr addr) {
+ ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
+ sizeof(ReportStack));
+ internal_memset(ent, 0, sizeof(*ent));
+ ent->pc = addr;
+ return ent;
+}
+
+static ReportStack *NewReportStackEntry(const AddressInfo &info) {
+ ReportStack *ent = NewReportStackEntry(info.address);
+ ent->module = StripModuleName(info.module);
+ ent->offset = info.module_offset;
+ if (info.function)
+ ent->func = internal_strdup(info.function);
+ if (info.file)
+ ent->file = internal_strdup(info.file);
+ ent->line = info.line;
+ ent->col = info.column;
+ return ent;
+}
+
+
+ ReportStack *next;
+ char *module;
+ uptr offset;
+ uptr pc;
+ char *func;
+ char *file;
+ int line;
+ int col;
+
+
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external() will be called.
+const uptr kExternalPCBit = 1ULL << 60;
+
+// May be overriden by JIT/JAVA/etc,
+// whatever produces PCs marked with kExternalPCBit.
+extern "C" bool __tsan_symbolize_external(uptr pc,
+ char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz,
+ int *line, int *col)
+ SANITIZER_WEAK_ATTRIBUTE;
+
+bool __tsan_symbolize_external(uptr pc,
+ char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz,
+ int *line, int *col) {
+ return false;
+}
+
+ReportStack *SymbolizeCode(uptr addr) {
+ // Check if PC comes from non-native land.
+ if (addr & kExternalPCBit) {
+ // Declare static to not consume too much stack space.
+ // We symbolize reports in a single thread, so this is fine.
+ static char func_buf[1024];
+ static char file_buf[1024];
+ int line, col;
+ if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
+ file_buf, sizeof(file_buf), &line, &col))
+ return NewReportStackEntry(addr);
+ ReportStack *ent = NewReportStackEntry(addr);
+ ent->module = 0;
+ ent->offset = 0;
+ ent->func = internal_strdup(func_buf);
+ ent->file = internal_strdup(file_buf);
+ ent->line = line;
+ ent->col = col;
+ return ent;
+ }
+ if (!Symbolizer::Get()->IsAvailable())
+ return SymbolizeCodeAddr2Line(addr);
+ static const uptr kMaxAddrFrames = 16;
+ InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
+ for (uptr i = 0; i < kMaxAddrFrames; i++)
+ new(&addr_frames[i]) AddressInfo();
+ uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
+ addr, addr_frames.data(), kMaxAddrFrames);
+ if (addr_frames_num == 0)
+ return NewReportStackEntry(addr);
+ ReportStack *top = 0;
+ ReportStack *bottom = 0;
+ for (uptr i = 0; i < addr_frames_num; i++) {
+ ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
+ CHECK(cur_entry);
+ addr_frames[i].Clear();
+ if (i == 0)
+ top = cur_entry;
+ else
+ bottom->next = cur_entry;
+ bottom = cur_entry;
+ }
+ return top;
+}
+
+ReportLocation *SymbolizeData(uptr addr) {
+ if (!Symbolizer::Get()->IsAvailable())
+ return 0;
+ DataInfo info;
+ if (!Symbolizer::Get()->SymbolizeData(addr, &info))
+ return 0;
+ ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
+ sizeof(ReportLocation));
+ internal_memset(ent, 0, sizeof(*ent));
+ ent->type = ReportLocationGlobal;
+ ent->module = StripModuleName(info.module);
+ ent->offset = info.module_offset;
+ if (info.name)
+ ent->name = internal_strdup(info.name);
+ ent->addr = info.start;
+ ent->size = info.size;
+ return ent;
+}
+
+void SymbolizeFlush() {
+ if (!Symbolizer::Get()->IsAvailable())
+ return;
+ Symbolizer::Get()->Flush();
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_symbolize.h b/gcc-4.9/libsanitizer/tsan/tsan_symbolize.h
new file mode 100644
index 000000000..892c11c06
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_symbolize.h
@@ -0,0 +1,31 @@
+//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYMBOLIZE_H
+#define TSAN_SYMBOLIZE_H
+
+#include "tsan_defs.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+ReportStack *SymbolizeCode(uptr addr);
+ReportLocation *SymbolizeData(uptr addr);
+void SymbolizeFlush();
+
+ReportStack *SymbolizeCodeAddr2Line(uptr addr);
+
+ReportStack *NewReportStackEntry(uptr addr);
+
+} // namespace __tsan
+
+#endif // TSAN_SYMBOLIZE_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc b/gcc-4.9/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
new file mode 100644
index 000000000..c278a42f3
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
@@ -0,0 +1,191 @@
+//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_symbolize.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_platform.h"
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <link.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+
+namespace __tsan {
+
+struct ModuleDesc {
+ const char *fullname;
+ const char *name;
+ uptr base;
+ int inp_fd;
+ int out_fd;
+};
+
+struct SectionDesc {
+ SectionDesc *next;
+ ModuleDesc *module;
+ uptr base;
+ uptr end;
+};
+
+struct DlIteratePhdrCtx {
+ SectionDesc *sections;
+ bool is_first;
+};
+
+static void NOINLINE InitModule(ModuleDesc *m) {
+ int outfd[2] = {};
+ if (pipe(&outfd[0])) {
+ Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
+ Die();
+ }
+ int infd[2] = {};
+ if (pipe(&infd[0])) {
+ Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
+ Die();
+ }
+ int pid = fork();
+ if (pid == 0) {
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = getdtablesize(); fd > 2; fd--)
+ internal_close(fd);
+ execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
+ _exit(0);
+ } else if (pid < 0) {
+ Printf("ThreadSanitizer: failed to fork symbolizer\n");
+ Die();
+ }
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ m->inp_fd = infd[0];
+ m->out_fd = outfd[1];
+}
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
+ InternalScopedBuffer<char> tmp(128);
+ if (ctx->is_first) {
+ internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
+ (int)internal_getpid());
+ info->dlpi_name = tmp.data();
+ }
+ ctx->is_first = false;
+ if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
+ return 0;
+ ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
+ sizeof(ModuleDesc));
+ m->fullname = internal_strdup(info->dlpi_name);
+ m->name = internal_strrchr(m->fullname, '/');
+ if (m->name)
+ m->name += 1;
+ else
+ m->name = m->fullname;
+ m->base = (uptr)info->dlpi_addr;
+ m->inp_fd = -1;
+ m->out_fd = -1;
+ DPrintf2("Module %s %zx\n", m->name, m->base);
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf64_Phdr *s = &info->dlpi_phdr[i];
+ DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
+ " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
+ (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
+ (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
+ (uptr)s->p_flags, (uptr)s->p_align);
+ if (s->p_type != PT_LOAD)
+ continue;
+ SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
+ sizeof(SectionDesc));
+ sec->module = m;
+ sec->base = info->dlpi_addr + s->p_vaddr;
+ sec->end = sec->base + s->p_memsz;
+ sec->next = ctx->sections;
+ ctx->sections = sec;
+ DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
+ }
+ return 0;
+}
+
+static SectionDesc *InitSections() {
+ DlIteratePhdrCtx ctx = {0, true};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
+ return ctx.sections;
+}
+
+static SectionDesc *GetSectionDesc(uptr addr) {
+ static SectionDesc *sections = 0;
+ if (sections == 0)
+ sections = InitSections();
+ for (SectionDesc *s = sections; s; s = s->next) {
+ if (addr >= s->base && addr < s->end) {
+ if (s->module->inp_fd == -1)
+ InitModule(s->module);
+ return s;
+ }
+ }
+ return 0;
+}
+
+ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
+ SectionDesc *s = GetSectionDesc(addr);
+ if (s == 0)
+ return NewReportStackEntry(addr);
+ ModuleDesc *m = s->module;
+ uptr offset = addr - m->base;
+ char addrstr[32];
+ internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
+ if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
+ Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
+ m->out_fd, errno);
+ Die();
+ }
+ InternalScopedBuffer<char> func(1024);
+ ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1);
+ if (len <= 0) {
+ Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
+ m->inp_fd, errno);
+ Die();
+ }
+ func.data()[len] = 0;
+ ReportStack *res = NewReportStackEntry(addr);
+ res->module = internal_strdup(m->name);
+ res->offset = offset;
+ char *pos = (char*)internal_strchr(func.data(), '\n');
+ if (pos && func[0] != '?') {
+ res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1);
+ internal_memcpy(res->func, func.data(), pos - func.data());
+ res->func[pos - func.data()] = 0;
+ char *pos2 = (char*)internal_strchr(pos, ':');
+ if (pos2) {
+ res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
+ internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
+ res->file[pos2 - pos - 1] = 0;
+ res->line = atoi(pos2 + 1);
+ }
+ }
+ return res;
+}
+
+ReportStack *SymbolizeDataAddr2Line(uptr addr) {
+ return 0;
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_sync.cc b/gcc-4.9/libsanitizer/tsan/tsan_sync.cc
new file mode 100644
index 000000000..0c5be105f
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_sync.cc
@@ -0,0 +1,304 @@
+//===-- tsan_sync.cc ------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+SyncVar::SyncVar(uptr addr, u64 uid)
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar)
+ , addr(addr)
+ , uid(uid)
+ , owner_tid(kInvalidTid)
+ , last_lock()
+ , recursion()
+ , is_rw()
+ , is_recursive()
+ , is_broken()
+ , is_linker_init() {
+}
+
+SyncTab::Part::Part()
+ : mtx(MutexTypeSyncTab, StatMtxSyncTab)
+ , val() {
+}
+
+SyncTab::SyncTab() {
+}
+
+SyncTab::~SyncTab() {
+ for (int i = 0; i < kPartCount; i++) {
+ while (tab_[i].val) {
+ SyncVar *tmp = tab_[i].val;
+ tab_[i].val = tmp->next;
+ DestroyAndFree(tmp);
+ }
+ }
+}
+
+SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
+}
+
+SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
+}
+
+SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
+ StatInc(thr, StatSyncCreated);
+ void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ SyncVar *res = new(mem) SyncVar(addr, uid);
+#ifndef TSAN_GO
+ res->creation_stack_id = CurrentStackId(thr, pc);
+#endif
+ return res;
+}
+
+SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock, bool create) {
+#ifndef TSAN_GO
+ { // NOLINT
+ SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
+ if (res)
+ return res;
+ }
+
+ // Here we ask only PrimaryAllocator, because
+ // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
+ // the hashmap anyway.
+ if (PrimaryAllocator::PointerIsMine((void*)addr)) {
+ MBlock *b = user_mblock(thr, (void*)addr);
+ CHECK_NE(b, 0);
+ MBlock::ScopedLock l(b);
+ SyncVar *res = 0;
+ for (res = b->ListHead(); res; res = res->next) {
+ if (res->addr == addr)
+ break;
+ }
+ if (res == 0) {
+ if (!create)
+ return 0;
+ res = Create(thr, pc, addr);
+ b->ListPush(res);
+ }
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+#endif
+
+ Part *p = &tab_[PartIdx(addr)];
+ {
+ ReadLock l(&p->mtx);
+ for (SyncVar *res = p->val; res; res = res->next) {
+ if (res->addr == addr) {
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+ }
+ }
+ if (!create)
+ return 0;
+ {
+ Lock l(&p->mtx);
+ SyncVar *res = p->val;
+ for (; res; res = res->next) {
+ if (res->addr == addr)
+ break;
+ }
+ if (res == 0) {
+ res = Create(thr, pc, addr);
+ res->next = p->val;
+ p->val = res;
+ }
+ if (write_lock)
+ res->mtx.Lock();
+ else
+ res->mtx.ReadLock();
+ return res;
+ }
+}
+
+SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
+#ifndef TSAN_GO
+ { // NOLINT
+ SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
+ if (res)
+ return res;
+ }
+ if (PrimaryAllocator::PointerIsMine((void*)addr)) {
+ MBlock *b = user_mblock(thr, (void*)addr);
+ CHECK_NE(b, 0);
+ SyncVar *res = 0;
+ {
+ MBlock::ScopedLock l(b);
+ res = b->ListHead();
+ if (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ b->ListPop();
+ } else {
+ SyncVar **prev = &res->next;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ }
+ }
+ }
+ return res;
+ }
+#endif
+
+ Part *p = &tab_[PartIdx(addr)];
+ SyncVar *res = 0;
+ {
+ Lock l(&p->mtx);
+ SyncVar **prev = &p->val;
+ res = *prev;
+ while (res) {
+ if (res->addr == addr) {
+ if (res->is_linker_init)
+ return 0;
+ *prev = res->next;
+ break;
+ }
+ prev = &res->next;
+ res = *prev;
+ }
+ }
+ if (res) {
+ StatInc(thr, StatSyncDestroyed);
+ res->mtx.Lock();
+ res->mtx.Unlock();
+ }
+ return res;
+}
+
+int SyncTab::PartIdx(uptr addr) {
+ return (addr >> 3) % kPartCount;
+}
+
+StackTrace::StackTrace()
+ : n_()
+ , s_()
+ , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+ : n_()
+ , s_(buf)
+ , c_(cnt) {
+ CHECK_NE(buf, 0);
+ CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+ Reset();
+}
+
+void StackTrace::Reset() {
+ if (s_ && !c_) {
+ CHECK_NE(n_, 0);
+ internal_free(s_);
+ s_ = 0;
+ }
+ n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+ Reset();
+ if (cnt == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(cnt, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+ }
+ n_ = cnt;
+ internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+ Reset();
+ n_ = thr->shadow_stack_pos - thr->shadow_stack;
+ if (n_ + !!toppc == 0)
+ return;
+ uptr start = 0;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ if (n_ + !!toppc > c_) {
+ start = n_ - c_ + !!toppc;
+ n_ = c_ - !!toppc;
+ }
+ } else {
+ // Cap potentially huge stacks.
+ if (n_ + !!toppc > kTraceStackSize) {
+ start = n_ - kTraceStackSize + !!toppc;
+ n_ = kTraceStackSize - !!toppc;
+ }
+ s_ = (uptr*)internal_alloc(MBlockStackTrace,
+ (n_ + !!toppc) * sizeof(s_[0]));
+ }
+ for (uptr i = 0; i < n_; i++)
+ s_[i] = thr->shadow_stack[start + i];
+ if (toppc) {
+ s_[n_] = toppc;
+ n_++;
+ }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+ Reset();
+ Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+ return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+ return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+ CHECK_LT(i, n_);
+ return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+ return s_;
+}
+
+} // namespace __tsan
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_sync.h b/gcc-4.9/libsanitizer/tsan/tsan_sync.h
new file mode 100644
index 000000000..2867a8ac7
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_sync.h
@@ -0,0 +1,125 @@
+//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYNC_H
+#define TSAN_SYNC_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class SlabCache;
+
+class StackTrace {
+ public:
+ StackTrace();
+ // Initialized the object in "static mode",
+ // in this mode it never calls malloc/free but uses the provided buffer.
+ StackTrace(uptr *buf, uptr cnt);
+ ~StackTrace();
+ void Reset();
+
+ void Init(const uptr *pcs, uptr cnt);
+ void ObtainCurrent(ThreadState *thr, uptr toppc);
+ bool IsEmpty() const;
+ uptr Size() const;
+ uptr Get(uptr i) const;
+ const uptr *Begin() const;
+ void CopyFrom(const StackTrace& other);
+
+ private:
+ uptr n_;
+ uptr *s_;
+ const uptr c_;
+
+ StackTrace(const StackTrace&);
+ void operator = (const StackTrace&);
+};
+
+struct SyncVar {
+ explicit SyncVar(uptr addr, u64 uid);
+
+ static const int kInvalidTid = -1;
+
+ Mutex mtx;
+ uptr addr;
+ const u64 uid; // Globally unique id.
+ SyncClock clock;
+ SyncClock read_clock; // Used for rw mutexes only.
+ u32 creation_stack_id;
+ int owner_tid; // Set only by exclusive owners.
+ u64 last_lock;
+ int recursion;
+ bool is_rw;
+ bool is_recursive;
+ bool is_broken;
+ bool is_linker_init;
+ SyncVar *next; // In SyncTab hashtable.
+
+ uptr GetMemoryConsumption();
+ u64 GetId() const {
+ // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
+ return GetLsb((u64)addr | (uid << 47), 61);
+ }
+ bool CheckId(u64 uid) const {
+ CHECK_EQ(uid, GetLsb(uid, 14));
+ return GetLsb(this->uid, 14) == uid;
+ }
+ static uptr SplitId(u64 id, u64 *uid) {
+ *uid = id >> 47;
+ return (uptr)GetLsb(id, 47);
+ }
+};
+
+class SyncTab {
+ public:
+ SyncTab();
+ ~SyncTab();
+
+ SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock);
+ SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+
+ // If the SyncVar does not exist, returns 0.
+ SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+
+ SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+
+ uptr GetMemoryConsumption(uptr *nsync);
+
+ private:
+ struct Part {
+ Mutex mtx;
+ SyncVar *val;
+ char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
+ Part();
+ };
+
+ // FIXME: Implement something more sane.
+ static const int kPartCount = 1009;
+ Part tab_[kPartCount];
+ atomic_uint64_t uid_gen_;
+
+ int PartIdx(uptr addr);
+
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock, bool create);
+
+ SyncTab(const SyncTab&); // Not implemented.
+ void operator = (const SyncTab&); // Not implemented.
+};
+
+} // namespace __tsan
+
+#endif // TSAN_SYNC_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_trace.h b/gcc-4.9/libsanitizer/tsan/tsan_trace.h
new file mode 100644
index 000000000..93ed8d907
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_trace.h
@@ -0,0 +1,76 @@
+//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TRACE_H
+#define TSAN_TRACE_H
+
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+#include "tsan_sync.h"
+#include "tsan_mutexset.h"
+
+namespace __tsan {
+
+const int kTracePartSizeBits = 14;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
+const int kTraceSize = kTracePartSize * kTraceParts;
+
+// Must fit into 3 bits.
+enum EventType {
+ EventTypeMop,
+ EventTypeFuncEnter,
+ EventTypeFuncExit,
+ EventTypeLock,
+ EventTypeUnlock,
+ EventTypeRLock,
+ EventTypeRUnlock
+};
+
+// Represents a thread event (from most significant bit):
+// u64 typ : 3; // EventType.
+// u64 addr : 61; // Associated pc.
+typedef u64 Event;
+
+struct TraceHeader {
+ StackTrace stack0; // Start stack for the trace.
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+#ifndef TSAN_GO
+ uptr stack0buf[kTraceStackSize];
+#endif
+
+ TraceHeader()
+#ifndef TSAN_GO
+ : stack0(stack0buf, kTraceStackSize)
+#else
+ : stack0()
+#endif
+ , epoch0() {
+ }
+};
+
+struct Trace {
+ TraceHeader headers[kTraceParts];
+ Mutex mtx;
+#ifndef TSAN_GO
+ // Must be last to catch overflow as paging fault.
+ // Go shadow stack is dynamically allocated.
+ uptr shadow_stack[kShadowStackSize];
+#endif
+
+ Trace()
+ : mtx(MutexTypeTrace, StatMtxTrace) {
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_TRACE_H
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_update_shadow_word_inl.h b/gcc-4.9/libsanitizer/tsan/tsan_update_shadow_word_inl.h
new file mode 100644
index 000000000..42caf80d3
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_update_shadow_word_inl.h
@@ -0,0 +1,74 @@
+//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Body of the hottest inner loop.
+// If we wrap this body into a function, compilers (both gcc and clang)
+// produce sligtly less efficient code.
+//===----------------------------------------------------------------------===//
+do {
+ StatInc(thr, StatShadowProcessed);
+ const unsigned kAccessSize = 1 << kAccessSizeLog;
+ unsigned off = cur.ComputeSearchOffset();
+ u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ old = LoadShadow(sp);
+ if (old.IsZero()) {
+ StatInc(thr, StatShadowZero);
+ if (store_word)
+ StoreIfNotYetStored(sp, &store_word);
+ // The above StoreIfNotYetStored could be done unconditionally
+ // and it even shows 4% gain on synthetic benchmarks (r4307).
+ break;
+ }
+ // is the memory access equal to the previous?
+ if (Shadow::Addr0AndSizeAreEqual(cur, old)) {
+ StatInc(thr, StatShadowSameSize);
+ // same thread?
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ if (OldIsInSameSynchEpoch(old, thr)) {
+ if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
+ // found a slot that holds effectively the same info
+ // (that is, same tid, same sync epoch and same size)
+ StatInc(thr, StatMopSame);
+ return;
+ }
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (HappensBefore(old, thr)) {
+ StoreIfNotYetStored(sp, &store_word);
+ break;
+ }
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
+ break;
+ goto RACE;
+ }
+ // Do the memory access intersect?
+ if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
+ StatInc(thr, StatShadowIntersect);
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
+ break;
+ if (HappensBefore(old, thr))
+ break;
+ goto RACE;
+ }
+ // The accesses do not intersect.
+ StatInc(thr, StatShadowNotIntersect);
+ break;
+} while (0);
diff --git a/gcc-4.9/libsanitizer/tsan/tsan_vector.h b/gcc-4.9/libsanitizer/tsan/tsan_vector.h
new file mode 100644
index 000000000..4da8b83d5
--- /dev/null
+++ b/gcc-4.9/libsanitizer/tsan/tsan_vector.h
@@ -0,0 +1,113 @@
+//===-- tsan_vector.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// Low-fat STL-like vector container.
+
+#ifndef TSAN_VECTOR_H
+#define TSAN_VECTOR_H
+
+#include "tsan_defs.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+template<typename T>
+class Vector {
+ public:
+ explicit Vector(MBlockType typ)
+ : typ_(typ)
+ , begin_()
+ , end_()
+ , last_() {
+ }
+
+ ~Vector() {
+ if (begin_)
+ internal_free(begin_);
+ }
+
+ void Reset() {
+ if (begin_)
+ internal_free(begin_);
+ begin_ = 0;
+ end_ = 0;
+ last_ = 0;
+ }
+
+ uptr Size() const {
+ return end_ - begin_;
+ }
+
+ T &operator[](uptr i) {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ const T &operator[](uptr i) const {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ T *PushBack(T v = T()) {
+ EnsureSize(Size() + 1);
+ end_[-1] = v;
+ return &end_[-1];
+ }
+
+ void PopBack() {
+ DCHECK_GT(end_, begin_);
+ end_--;
+ }
+
+ void Resize(uptr size) {
+ uptr old_size = Size();
+ EnsureSize(size);
+ if (old_size < size) {
+ for (uptr i = old_size; i < size; i++)
+ begin_[i] = T();
+ }
+ }
+
+ private:
+ const MBlockType typ_;
+ T *begin_;
+ T *end_;
+ T *last_;
+
+ void EnsureSize(uptr size) {
+ if (size <= Size())
+ return;
+ if (size <= (uptr)(last_ - begin_)) {
+ end_ = begin_ + size;
+ return;
+ }
+ uptr cap0 = last_ - begin_;
+ uptr cap = 2 * cap0;
+ if (cap == 0)
+ cap = 16;
+ if (cap < size)
+ cap = size;
+ T *p = (T*)internal_alloc(typ_, cap * sizeof(T));
+ if (cap0) {
+ internal_memcpy(p, begin_, cap0 * sizeof(T));
+ internal_free(begin_);
+ }
+ begin_ = p;
+ end_ = begin_ + size;
+ last_ = begin_ + cap;
+ }
+
+ Vector(const Vector&);
+ void operator=(const Vector&);
+};
+} // namespace __tsan
+
+#endif // #ifndef TSAN_VECTOR_H