summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-06-05 20:48:42 -0700
committerIan Rogers <irogers@google.com>2014-06-10 23:19:29 -0700
commitc5f17732d8144491c642776b6b48c85dfadf4b52 (patch)
tree811daa488ae5ee5dfd9b3b73bd210bc1506e5ca1
parent08654d40cdd256f6a6c8619bf06d04d4c819714a (diff)
downloadandroid_art-c5f17732d8144491c642776b6b48c85dfadf4b52.tar.gz
android_art-c5f17732d8144491c642776b6b48c85dfadf4b52.tar.bz2
android_art-c5f17732d8144491c642776b6b48c85dfadf4b52.zip
Remove deprecated WITH_HOST_DALVIK.
Bug: 13751317 Fix the Mac build: - disable x86 selector removal that causes OS/X 10.9 kernel panics, - madvise don't need does zero memory on the Mac, factor into MemMap routine, - switch to the elf.h in elfutils to avoid Linux kernel dependencies, - we can't rely on exclusive_owner_ being available from other pthread libraries so maintain our own when futexes aren't available (we can't rely on the OS/X 10.8 hack any more), - fix symbol naming in assembly code, - work around C library differences, - disable backtrace in DumpNativeStack to avoid a broken libbacktrace dependency, - disable main thread signal handling logic, - align the stack in stub_test, - use $(HOST_SHLIB_SUFFIX) rather than .so in host make file variables. Not all host tests are passing on the Mac with this change. dex2oat works as does running HelloWorld. Change-Id: I5a232aedfb2028524d49daa6397a8e60f3ee40d3
-rw-r--r--build/Android.common.mk4
-rw-r--r--build/Android.gtest.mk8
-rw-r--r--build/Android.libarttest.mk6
-rw-r--r--compiler/Android.mk14
-rw-r--r--compiler/utils/arena_allocator.cc2
-rw-r--r--dalvikvm/Android.mk2
-rw-r--r--dex2oat/Android.mk14
-rw-r--r--disassembler/Android.mk14
-rw-r--r--oatdump/Android.mk12
-rw-r--r--runtime/Android.mk20
-rw-r--r--runtime/arch/stub_test.cc8
-rw-r--r--runtime/arch/x86/asm_support_x86.S6
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S42
-rw-r--r--runtime/arch/x86/thread_x86.cc6
-rw-r--r--runtime/base/mutex-inl.h87
-rw-r--r--runtime/base/mutex.cc88
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/base/unix_file/fd_file.cc16
-rw-r--r--runtime/base/unix_file/mapped_file.cc4
-rw-r--r--runtime/base/unix_file/mapped_file.h5
-rw-r--r--runtime/elf_utils.h5
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h5
-rw-r--r--runtime/gc/accounting/card_table.cc2
-rw-r--r--runtime/gc/accounting/space_bitmap.cc8
-rw-r--r--runtime/gc/allocator/rosalloc.cc6
-rw-r--r--runtime/gc/allocator/rosalloc.h6
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/space/bump_pointer_space.cc3
-rw-r--r--runtime/mem_map.cc12
-rw-r--r--runtime/mem_map.h8
-rw-r--r--runtime/native/dalvik_system_DexFile.cc9
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc2
-rw-r--r--runtime/runtime.cc7
-rw-r--r--runtime/runtime_linux.cc5
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/utils.cc2
-rw-r--r--test/Android.mk4
-rw-r--r--test/SignalTest/signaltest.cc2
-rw-r--r--tools/Android.mk3
40 files changed, 229 insertions, 230 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk
index a14b9518be..09f34b3092 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -34,8 +34,8 @@ endif
#
ART_BUILD_TARGET_NDEBUG ?= true
ART_BUILD_TARGET_DEBUG ?= true
-ART_BUILD_HOST_NDEBUG ?= $(WITH_HOST_DALVIK)
-ART_BUILD_HOST_DEBUG ?= $(WITH_HOST_DALVIK)
+ART_BUILD_HOST_NDEBUG ?= true
+ART_BUILD_HOST_DEBUG ?= true
ifeq ($(HOST_PREFER_32_BIT),true)
ART_HOST_ARCH := $(HOST_2ND_ARCH)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 407269bca6..1bb1d563d6 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -250,11 +250,9 @@ ifeq ($(ART_BUILD_TARGET),true)
$(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call build-art-test,target,$(file),,)))
$(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call build-art-test,target,$(file),art/compiler,libartd-compiler)))
endif
-ifeq ($(WITH_HOST_DALVIK),true)
- ifeq ($(ART_BUILD_HOST),true)
- $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),,)))
- $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),art/compiler,libartd-compiler)))
- endif
+ifeq ($(ART_BUILD_HOST),true)
+ $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),,)))
+ $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call build-art-test,host,$(file),art/compiler,libartd-compiler)))
endif
# Used outside the art project to get a list of the current tests
diff --git a/build/Android.libarttest.mk b/build/Android.libarttest.mk
index b4c99b5d16..76e5af0e85 100644
--- a/build/Android.libarttest.mk
+++ b/build/Android.libarttest.mk
@@ -74,8 +74,6 @@ endef
ifeq ($(ART_BUILD_TARGET),true)
$(eval $(call build-libarttest,target))
endif
-ifeq ($(WITH_HOST_DALVIK),true)
- ifeq ($(ART_BUILD_HOST),true)
- $(eval $(call build-libarttest,host))
- endif
+ifeq ($(ART_BUILD_HOST),true)
+ $(eval $(call build-libarttest,host))
endif
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3cf7368a51..9a868fcd79 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -273,14 +273,12 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endef
-ifeq ($(WITH_HOST_DALVIK),true)
- # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
- ifeq ($(ART_BUILD_NDEBUG),true)
- $(eval $(call build-libart-compiler,host,ndebug))
- endif
- ifeq ($(ART_BUILD_DEBUG),true)
- $(eval $(call build-libart-compiler,host,debug))
- endif
+# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-libart-compiler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-libart-compiler,host,debug))
endif
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
$(eval $(call build-libart-compiler,target,ndebug))
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 6a39641f38..925d4a287a 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -139,7 +139,7 @@ void Arena::Reset() {
if (kUseMemSet || !kUseMemMap) {
memset(Begin(), 0, bytes_allocated_);
} else {
- madvise(Begin(), bytes_allocated_, MADV_DONTNEED);
+ map_->MadviseDontNeedAndZero();
}
bytes_allocated_ = 0;
}
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index 03d32f01bc..31fcd176ae 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -38,7 +38,6 @@ include $(BUILD_SYSTEM)/executable_prefer_symlink.mk
ART_TARGET_EXECUTABLES += $(TARGET_OUT_EXECUTABLES)/$(LOCAL_MODULE)
-ifeq ($(WITH_HOST_DALVIK),true)
include $(CLEAR_VARS)
LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
@@ -54,4 +53,3 @@ LOCAL_IS_HOST_MODULE := true
include external/libcxx/libcxx.mk
include $(BUILD_HOST_EXECUTABLE)
ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)
-endif
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index c17788ed9c..28db7115d1 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -36,12 +36,10 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libcutils libartd-compiler,art/compiler,target,debug,$(dex2oat_arch)))
endif
-ifeq ($(WITH_HOST_DALVIK),true)
- # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
- ifeq ($(ART_BUILD_NDEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
- endif
- ifeq ($(ART_BUILD_DEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
- endif
+# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
endif
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index b4b194dec1..feacbde2e8 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -98,12 +98,10 @@ endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
$(eval $(call build-libart-disassembler,target,debug))
endif
-ifeq ($(WITH_HOST_DALVIK),true)
- # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
- ifeq ($(ART_BUILD_NDEBUG),true)
- $(eval $(call build-libart-disassembler,host,ndebug))
- endif
- ifeq ($(ART_BUILD_DEBUG),true)
- $(eval $(call build-libart-disassembler,host,debug))
- endif
+# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-libart-disassembler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-libart-disassembler,host,debug))
endif
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 7cee00e182..ecf6a0b868 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -28,11 +28,9 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true)
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler,art/disassembler,target,debug))
endif
-ifeq ($(WITH_HOST_DALVIK),true)
- ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler,art/disassembler,host,ndebug))
- endif
- ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler,art/disassembler,host,debug))
- endif
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler,art/disassembler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler,art/disassembler,host,debug))
endif
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8d532c767e..ce315f56bc 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -195,7 +195,8 @@ LIBART_COMMON_SRC_FILES += \
LIBART_GCC_ONLY_SRC_FILES := \
interpreter/interpreter_goto_table_impl.cc
-LIBART_LDFLAGS := -Wl,--no-fatal-warnings
+LIBART_TARGET_LDFLAGS := -Wl,--no-fatal-warnings
+LIBART_HOST_LDFLAGS :=
LIBART_TARGET_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
@@ -365,6 +366,11 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_CFLAGS := $(LIBART_CFLAGS)
LOCAL_LDFLAGS := $(LIBART_LDFLAGS)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_LDFLAGS += $(LIBART_TARGET_LDFLAGS)
+ else
+ LOCAL_LDFLAGS += $(LIBART_HOST_LDFLAGS)
+ endif
$(foreach arch,$(ART_SUPPORTED_ARCH),
LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch)))
@@ -439,13 +445,11 @@ endef
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
# they are used to cross compile for the target.
-ifeq ($(WITH_HOST_DALVIK),true)
- ifeq ($(ART_BUILD_NDEBUG),true)
- $(eval $(call build-libart,host,ndebug))
- endif
- ifeq ($(ART_BUILD_DEBUG),true)
- $(eval $(call build-libart,host,debug))
- endif
+ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-libart,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-libart,host,debug))
endif
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 3be0faf5ac..59311bc7a4 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -77,9 +77,10 @@ class StubTest : public CommonRuntimeTest {
#if defined(__i386__)
// TODO: Set the thread?
__asm__ __volatile__(
- "pushl %[referrer]\n\t" // Store referrer
+ "subl $12, %%esp\n\t" // Align stack.
+ "pushl %[referrer]\n\t" // Store referrer.
"call *%%edi\n\t" // Call the stub
- "addl $4, %%esp" // Pop referrer
+ "addl $16, %%esp" // Pop referrer
: "=a" (result)
// Use the result from eax
: "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
@@ -300,9 +301,10 @@ class StubTest : public CommonRuntimeTest {
// TODO: Set the thread?
__asm__ __volatile__(
"movd %[hidden], %%xmm0\n\t"
+ "subl $12, %%esp\n\t" // Align stack.
"pushl %[referrer]\n\t" // Store referrer
"call *%%edi\n\t" // Call the stub
- "addl $4, %%esp" // Pop referrer
+ "addl $16, %%esp" // Pop referrer
: "=a" (result)
// Use the result from eax
: "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"m"(referrer), [hidden]"r"(hidden)
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index f1d07464e3..ae39be13d8 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -28,6 +28,7 @@
#define END_MACRO .endmacro
// Clang's as(1) uses $0, $1, and so on for macro arguments.
+ #define RAW_VAR(name,index) $index
#define VAR(name,index) SYMBOL($index)
#define PLT_VAR(name, index) SYMBOL($index)
#define REG_VAR(name,index) %$index
@@ -50,6 +51,7 @@
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
// special character meaning care needs to be taken when passing registers as macro arguments.
.altmacro
+ #define RAW_VAR(name,index) name&
#define VAR(name,index) name&
#define PLT_VAR(name, index) name&@PLT
#define REG_VAR(name,index) %name
@@ -94,7 +96,7 @@
#if !defined(__APPLE__)
#define SYMBOL(name) name
#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
- // TODO: Disabled for old clang 3.3, this leads to text reolocations and there should be a
+ // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a
// better fix.
#define PLT_SYMBOL(name) name // ## @PLT
#else
@@ -151,8 +153,10 @@ VAR(name, 0):
END_MACRO
MACRO0(SETUP_GOT_NOSAVE)
+#ifndef __APPLE__
call __x86.get_pc_thunk.bx
addl $_GLOBAL_OFFSET_TABLE_, %ebx
+#endif
END_MACRO
MACRO0(SETUP_GOT)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 989ecf948c..e522143842 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -111,7 +111,7 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
@@ -123,11 +123,11 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
int3 // unreached
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
@@ -139,11 +139,11 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
int3 // unreached
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %esp, %edx
// Outgoing argument set up
@@ -155,7 +155,7 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
int3 // unreached
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
/*
@@ -207,7 +207,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC
* pointing back to the original caller.
*/
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
// Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
// return address
PUSH edi
@@ -248,7 +248,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
addl MACRO_LITERAL(4), %esp // Pop code pointer off stack
CFI_ADJUST_CFA_OFFSET(-4)
DELIVER_PENDING_EXCEPTION
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
@@ -315,7 +315,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
END_FUNCTION art_quick_invoke_stub
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %edx // remember SP
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
@@ -330,11 +330,11 @@ MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %edx // remember SP
SETUP_GOT_NOSAVE // clobbers EBX
@@ -349,11 +349,11 @@ MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %edx // remember SP
SETUP_GOT_NOSAVE // clobbers EBX
@@ -368,11 +368,11 @@ MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %ebx // remember SP
// Outgoing argument set up
@@ -390,7 +390,7 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
@@ -653,17 +653,17 @@ END_FUNCTION art_quick_check_cast
*/
DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
testl %eax, %eax
- jnz art_quick_aput_obj_with_bound_check
- jmp art_quick_throw_null_pointer_exception
+ jnz SYMBOL(art_quick_aput_obj_with_bound_check)
+ jmp SYMBOL(art_quick_throw_null_pointer_exception)
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
movl ARRAY_LENGTH_OFFSET(%eax), %ebx
cmpl %ebx, %ecx
- jb art_quick_aput_obj
+ jb SYMBOL(art_quick_aput_obj)
mov %ecx, %eax
mov %ebx, %ecx
- jmp art_quick_throw_array_bounds
+ jmp SYMBOL(art_quick_throw_array_bounds)
END_FUNCTION art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj
@@ -1122,7 +1122,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
movd %xmm0, %ecx // get target method index stored in xmm0
movl OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax // load the target method
POP ecx
- jmp art_quick_invoke_interface_trampoline
+ jmp SYMBOL(art_quick_invoke_interface_trampoline)
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index 9f36927877..b97c143914 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -156,7 +156,11 @@ void Thread::CleanupCpu() {
// Free LDT entry.
#if defined(__APPLE__)
- i386_set_ldt(selector >> 3, 0, 1);
+ // TODO: release selectors on OS/X this is a leak which will cause ldt entries to be exhausted
+ // after enough threads are created. However, the following code results in kernel panics in OS/X
+ // 10.9.
+ UNUSED(selector);
+ // i386_set_ldt(selector >> 3, 0, 1);
#else
user_desc ldt_entry;
memset(&ldt_entry, 0, sizeof(ldt_entry));
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index d20eb17a3c..1890181342 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -45,54 +45,6 @@ static inline int futex(volatile int *uaddr, int op, int val, const struct times
}
#endif // ART_USE_FUTEXES
-#if defined(__APPLE__)
-
-// This works on Mac OS 10.6 but hasn't been tested on older releases.
-struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- int padding1;
- uint32_t padding2;
- int16_t padding3;
- int16_t padding4;
- uint32_t padding5;
- pthread_t darwin_pthread_mutex_owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- pthread_mutex_t padding1;
- int padding2;
- pthread_cond_t padding3;
- pthread_cond_t padding4;
- int padding5;
- int padding6;
- pthread_t darwin_pthread_rwlock_owner;
- // ...other stuff we don't care about.
-};
-
-#endif // __APPLE__
-
-#if defined(__GLIBC__)
-
-struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
- int32_t padding0[2];
- int owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
-#ifdef __LP64__
- int32_t padding0[6];
-#else
- int32_t padding0[7];
-#endif
- int writer;
- // ...other stuff we don't care about.
-};
-
-#endif // __GLIBC__
-
class ScopedContentionRecorder {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
@@ -219,12 +171,14 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) {
#else
CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
#endif
+ DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
RegisterAsLocked(self);
AssertSharedHeld(self);
}
inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
AssertSharedHeld(self);
RegisterAsUnlocked(self);
#if ART_USE_FUTEXES
@@ -262,26 +216,7 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
}
inline uint64_t Mutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
return exclusive_owner_;
-#elif defined(__BIONIC__)
- return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
-#elif defined(__APPLE__)
- const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
- pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
- // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
- // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
- if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
}
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
@@ -307,23 +242,7 @@ inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
return exclusive_owner_;
}
#else
-#if defined(__BIONIC__)
- return rwlock_.writerThreadId;
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
-#elif defined(__APPLE__)
- const darwin_pthread_rwlock_t*
- dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
- pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
- if (owner == (pthread_t)0) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
+ return exclusive_owner_;
#endif
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index aeece74687..fd1eb12420 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -263,19 +263,11 @@ Mutex::Mutex(const char* name, LockLevel level, bool recursive)
: BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
#if ART_USE_FUTEXES
state_ = 0;
- exclusive_owner_ = 0;
DCHECK_EQ(0, num_contenders_.LoadRelaxed());
-#elif defined(__BIONIC__) || defined(__APPLE__)
- // Use recursive mutexes for bionic and Apple otherwise the
- // non-recursive mutexes don't have TIDs to check lock ownership of.
- pthread_mutexattr_t attributes;
- CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
- CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
- CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
- CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
#else
- CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
+ CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
#endif
+ exclusive_owner_ = 0;
}
Mutex::~Mutex() {
@@ -336,10 +328,11 @@ void Mutex::ExclusiveLock(Thread* self) {
// TODO: Change state_ to be a art::Atomic and use an intention revealing CAS operation
// that exposes the ordering semantics.
DCHECK_EQ(state_, 1);
- exclusive_owner_ = SafeGetTid(self);
#else
CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
#endif
+ DCHECK_EQ(exclusive_owner_, 0U);
+ exclusive_owner_ = SafeGetTid(self);
RegisterAsLocked(self);
}
recursion_count_++;
@@ -369,7 +362,6 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
} while (!done);
// We again assert no memory fence is needed.
DCHECK_EQ(state_, 1);
- exclusive_owner_ = SafeGetTid(self);
#else
int result = pthread_mutex_trylock(&mutex_);
if (result == EBUSY) {
@@ -380,6 +372,8 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
}
#endif
+ DCHECK_EQ(exclusive_owner_, 0U);
+ exclusive_owner_ = SafeGetTid(self);
RegisterAsLocked(self);
}
recursion_count_++;
@@ -394,6 +388,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
void Mutex::ExclusiveUnlock(Thread* self) {
DCHECK(self == NULL || self == Thread::Current());
AssertHeld(self);
+ DCHECK_NE(exclusive_owner_, 0U);
recursion_count_--;
if (!recursive_ || recursion_count_ == 0) {
if (kDebugLocking) {
@@ -402,34 +397,35 @@ void Mutex::ExclusiveUnlock(Thread* self) {
}
RegisterAsUnlocked(self);
#if ART_USE_FUTEXES
- bool done = false;
- do {
- int32_t cur_state = state_;
- if (LIKELY(cur_state == 1)) {
- // The __sync_bool_compare_and_swap enforces the necessary memory ordering.
- // We're no longer the owner.
- exclusive_owner_ = 0;
- // Change state to 0.
- done = __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
- if (LIKELY(done)) { // Spurious fail?
- // Wake a contender
- if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
- futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
+ bool done = false;
+ do {
+ int32_t cur_state = state_;
+ if (LIKELY(cur_state == 1)) {
+ // The __sync_bool_compare_and_swap enforces the necessary memory ordering.
+ // We're no longer the owner.
+ exclusive_owner_ = 0;
+ // Change state to 0.
+ done = __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
+ if (LIKELY(done)) { // Spurious fail?
+ // Wake a contender
+ if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
+ futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
+ }
}
- }
- } else {
- // Logging acquires the logging lock, avoid infinite recursion in that case.
- if (this != Locks::logging_lock_) {
- LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
} else {
- LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
- LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
- cur_state, name_).c_str());
- _exit(1);
+ // Logging acquires the logging lock, avoid infinite recursion in that case.
+ if (this != Locks::logging_lock_) {
+ LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
+ } else {
+ LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
+ LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
+ cur_state, name_).c_str());
+ _exit(1);
+ }
}
- }
- } while (!done);
+ } while (!done);
#else
+ exclusive_owner_ = 0;
CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
#endif
}
@@ -452,12 +448,13 @@ std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
: BaseMutex(name, level)
#if ART_USE_FUTEXES
- , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0)
+ , state_(0), num_pending_readers_(0), num_pending_writers_(0)
#endif
{ // NOLINT(whitespace/braces)
#if !ART_USE_FUTEXES
- CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL));
+ CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
#endif
+ exclusive_owner_ = 0;
}
ReaderWriterMutex::~ReaderWriterMutex() {
@@ -506,10 +503,11 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
}
} while (!done);
DCHECK_EQ(state_, -1);
- exclusive_owner_ = SafeGetTid(self);
#else
CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
#endif
+ DCHECK_EQ(exclusive_owner_, 0U);
+ exclusive_owner_ = SafeGetTid(self);
RegisterAsLocked(self);
AssertExclusiveHeld(self);
}
@@ -518,6 +516,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
DCHECK(self == NULL || self == Thread::Current());
AssertExclusiveHeld(self);
RegisterAsUnlocked(self);
+ DCHECK_NE(exclusive_owner_, 0U);
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -538,6 +537,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
}
} while (!done);
#else
+ exclusive_owner_ = 0;
CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
#endif
}
@@ -578,7 +578,6 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
num_pending_writers_--;
}
} while (!done);
- exclusive_owner_ = SafeGetTid(self);
#else
timespec ts;
InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
@@ -591,6 +590,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
}
#endif
+ exclusive_owner_ = SafeGetTid(self);
RegisterAsLocked(self);
AssertSharedHeld(self);
return true;
@@ -656,7 +656,7 @@ ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
num_waiters_ = 0;
#else
pthread_condattr_t cond_attrs;
- CHECK_MUTEX_CALL(pthread_condattr_init(&cond_attrs));
+ CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
#if !defined(__APPLE__)
// Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
@@ -763,8 +763,11 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) {
CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
guard_.num_contenders_--;
#else
+ uint64_t old_owner = guard_.exclusive_owner_;
+ guard_.exclusive_owner_ = 0;
guard_.recursion_count_ = 0;
CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
+ guard_.exclusive_owner_ = old_owner;
#endif
guard_.recursion_count_ = old_recursion_count;
}
@@ -804,6 +807,8 @@ void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
#else
int clock = CLOCK_REALTIME;
#endif
+ uint64_t old_owner = guard_.exclusive_owner_;
+ guard_.exclusive_owner_ = 0;
guard_.recursion_count_ = 0;
timespec ts;
InitTimeSpec(true, clock, ms, ns, &ts);
@@ -812,6 +817,7 @@ void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
errno = rc;
PLOG(FATAL) << "TimedWait failed for " << name_;
}
+ guard_.exclusive_owner_ = old_owner;
#endif
guard_.recursion_count_ = old_recursion_count;
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 68b450a1b9..1ba6180076 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -245,6 +245,7 @@ class LOCKABLE Mutex : public BaseMutex {
AtomicInteger num_contenders_;
#else
pthread_mutex_t mutex_;
+ volatile uint64_t exclusive_owner_; // Guarded by mutex_.
#endif
const bool recursive_; // Can the lock be recursively held?
unsigned int recursion_count_;
@@ -358,6 +359,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
AtomicInteger num_pending_writers_;
#else
pthread_rwlock_t rwlock_;
+ volatile uint64_t exclusive_owner_; // Guarded by rwlock_.
#endif
DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
};
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 87d1c0655d..6d5b59cbeb 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -69,17 +69,29 @@ int FdFile::Close() {
}
int FdFile::Flush() {
+#ifdef __linux__
int rc = TEMP_FAILURE_RETRY(fdatasync(fd_));
+#else
+ int rc = TEMP_FAILURE_RETRY(fsync(fd_));
+#endif
return (rc == -1) ? -errno : rc;
}
int64_t FdFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
+#ifdef __linux__
int rc = TEMP_FAILURE_RETRY(pread64(fd_, buf, byte_count, offset));
+#else
+ int rc = TEMP_FAILURE_RETRY(pread(fd_, buf, byte_count, offset));
+#endif
return (rc == -1) ? -errno : rc;
}
int FdFile::SetLength(int64_t new_length) {
+#ifdef __linux__
int rc = TEMP_FAILURE_RETRY(ftruncate64(fd_, new_length));
+#else
+ int rc = TEMP_FAILURE_RETRY(ftruncate(fd_, new_length));
+#endif
return (rc == -1) ? -errno : rc;
}
@@ -90,7 +102,11 @@ int64_t FdFile::GetLength() const {
}
int64_t FdFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
+#ifdef __linux__
int rc = TEMP_FAILURE_RETRY(pwrite64(fd_, buf, byte_count, offset));
+#else
+ int rc = TEMP_FAILURE_RETRY(pwrite(fd_, buf, byte_count, offset));
+#endif
return (rc == -1) ? -errno : rc;
}
diff --git a/runtime/base/unix_file/mapped_file.cc b/runtime/base/unix_file/mapped_file.cc
index bc23a745c9..63927b1216 100644
--- a/runtime/base/unix_file/mapped_file.cc
+++ b/runtime/base/unix_file/mapped_file.cc
@@ -61,7 +61,11 @@ bool MappedFile::MapReadOnly() {
bool MappedFile::MapReadWrite(int64_t file_size) {
CHECK(IsOpened());
CHECK(!IsMapped());
+#ifdef __linux__
int result = TEMP_FAILURE_RETRY(ftruncate64(Fd(), file_size));
+#else
+ int result = TEMP_FAILURE_RETRY(ftruncate(Fd(), file_size));
+#endif
if (result == -1) {
PLOG(ERROR) << "Failed to truncate file '" << GetPath()
<< "' to size " << file_size;
diff --git a/runtime/base/unix_file/mapped_file.h b/runtime/base/unix_file/mapped_file.h
index 28cc5514f7..73056e9764 100644
--- a/runtime/base/unix_file/mapped_file.h
+++ b/runtime/base/unix_file/mapped_file.h
@@ -32,8 +32,13 @@ class MappedFile : public FdFile {
public:
// File modes used in Open().
enum FileMode {
+#ifdef __linux__
kReadOnlyMode = O_RDONLY | O_LARGEFILE,
kReadWriteMode = O_CREAT | O_RDWR | O_LARGEFILE,
+#else
+ kReadOnlyMode = O_RDONLY,
+ kReadWriteMode = O_CREAT | O_RDWR,
+#endif
};
MappedFile() : FdFile(), file_size_(-1), mapped_file_(NULL) {
diff --git a/runtime/elf_utils.h b/runtime/elf_utils.h
index f3ec713bcc..f160dc4b2c 100644
--- a/runtime/elf_utils.h
+++ b/runtime/elf_utils.h
@@ -17,9 +17,8 @@
#ifndef ART_RUNTIME_ELF_UTILS_H_
#define ART_RUNTIME_ELF_UTILS_H_
-// Include the micro-API to avoid potential macro conflicts with the
-// compiler's own elf.h file.
-#include "../../bionic/libc/kernel/uapi/linux/elf.h"
+// Explicitly include elf.h from elfutils to avoid Linux and other dependencies.
+#include "../../external/elfutils/0.153/libelf/elf.h"
// Architecture dependent flags for the ELF header.
#define EF_ARM_EABI_VER5 0x05000000
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 6b216c7e89..3112bc0a28 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -67,7 +67,7 @@ void FaultManager::Init() {
action.sa_sigaction = art_fault_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__mips__)
+#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index bd04473f68..2c72ba13ec 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -49,10 +49,7 @@ class AtomicStack {
front_index_.StoreRelaxed(0);
back_index_.StoreRelaxed(0);
debug_is_sorted_ = true;
- int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
- if (result == -1) {
- PLOG(WARNING) << "madvise failed";
- }
+ mem_map_->MadviseDontNeedAndZero();
}
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 43a173e2be..a95c0038a4 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -96,7 +96,7 @@ void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
void CardTable::ClearCardTable() {
COMPILE_ASSERT(kCardClean == 0, clean_card_must_be_0);
- madvise(mem_map_->Begin(), mem_map_->Size(), MADV_DONTNEED);
+ mem_map_->MadviseDontNeedAndZero();
}
bool CardTable::AddrIsInCardTable(const void* addr) const {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index c294bae4a3..224b33e260 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -79,12 +79,8 @@ std::string SpaceBitmap<kAlignment>::Dump() const {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Clear() {
- if (bitmap_begin_ != NULL) {
- // This returns the memory to the system. Successive page faults will return zeroed memory.
- int result = madvise(bitmap_begin_, bitmap_size_, MADV_DONTNEED);
- if (result == -1) {
- PLOG(FATAL) << "madvise failed";
- }
+ if (bitmap_begin_ != nullptr) {
+ mem_map_->MadviseDontNeedAndZero();
}
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 10b88b3506..55262f2359 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1507,6 +1507,9 @@ bool RosAlloc::Trim() {
if (madvise_size > 0) {
DCHECK_ALIGNED(madvise_begin, kPageSize);
DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
+ if (!kMadviseZeroes) {
+ memset(madvise_begin, 0, madvise_size);
+ }
CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
}
if (madvise_begin - zero_begin) {
@@ -2117,6 +2120,9 @@ size_t RosAlloc::ReleasePages() {
start = reinterpret_cast<byte*>(fpr) + kPageSize;
}
byte* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+ if (!kMadviseZeroes) {
+ memset(start, 0, end - start);
+ }
CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
reclaimed_bytes += fpr_size;
size_t num_pages = fpr_size / kPageSize;
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 9464331c70..a439188858 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -110,11 +110,17 @@ class RosAlloc {
byte_size -= kPageSize;
if (byte_size > 0) {
if (release_pages) {
+ if (!kMadviseZeroes) {
+ memset(start, 0, byte_size);
+ }
madvise(start, byte_size, MADV_DONTNEED);
}
}
} else {
if (release_pages) {
+ if (!kMadviseZeroes) {
+ memset(start, 0, byte_size);
+ }
madvise(start, byte_size, MADV_DONTNEED);
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index c062706d56..890036bc4a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1130,9 +1130,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
allocations->Reset();
timings_.EndSplit();
- int success = madvise(sweep_array_free_buffer_mem_map_->BaseBegin(),
- sweep_array_free_buffer_mem_map_->BaseSize(), MADV_DONTNEED);
- DCHECK_EQ(success, 0) << "Failed to madvise the sweep array free buffer pages.";
+ sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
}
void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fd0a92d56f..8b3569232a 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -64,6 +64,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
void BumpPointerSpace::Clear() {
// Release the pages back to the operating system.
+ if (!kMadviseZeroes) {
+ memset(Begin(), 0, Limit() - Begin());
+ }
CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 22a61a21c0..81a86235ec 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -473,6 +473,18 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
}
+void MemMap::MadviseDontNeedAndZero() {
+ if (base_begin_ != nullptr || base_size_ != 0) {
+ if (!kMadviseZeroes) {
+ memset(base_begin_, 0, base_size_);
+ }
+ int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
+ if (result == -1) {
+ PLOG(WARNING) << "madvise failed";
+ }
+ }
+}
+
bool MemMap::Protect(int prot) {
if (base_begin_ == nullptr && base_size_ == 0) {
prot_ = prot;
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index dc5909b105..e42251ce57 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -30,6 +30,12 @@
namespace art {
+#ifdef __linux__
+static constexpr bool kMadviseZeroes = true;
+#else
+static constexpr bool kMadviseZeroes = false;
+#endif
+
// Used to keep track of mmap segments.
//
// On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
@@ -77,6 +83,8 @@ class MemMap {
bool Protect(int prot);
+ void MadviseDontNeedAndZero();
+
int GetProtect() const {
return prot_;
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 2c24e33fcc..7e3810cd18 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -17,7 +17,11 @@
#include <algorithm>
#include <set>
#include <fcntl.h>
+#ifdef __linux__
#include <sys/sendfile.h>
+#else
+#include <sys/socket.h>
+#endif
#include <sys/stat.h>
#include <unistd.h>
@@ -241,7 +245,12 @@ static void CopyProfileFile(const char* oldfile, const char* newfile) {
return;
}
+#ifdef __linux__
if (sendfile(dst.get(), src.get(), nullptr, stat_src.st_size) == -1) {
+#else
+ off_t len;
+ if (sendfile(dst.get(), src.get(), 0, &len, nullptr, 0) == -1) {
+#endif
PLOG(ERROR) << "Failed to copy profile file " << oldfile << " to " << newfile
<< ". My uid:gid is " << getuid() << ":" << getgid();
}
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 7490e6a762..820bd0420f 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -32,9 +32,11 @@ namespace art {
static void EnableDebugger() {
// To let a non-privileged gdbserver attach to this
// process, we must set our dumpable flag.
+#if defined(HAVE_PRCTL)
if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) {
PLOG(ERROR) << "prctl(PR_SET_DUMPABLE) failed for pid " << getpid();
}
+#endif
// We don't want core dumps, though, so set the core dump size to 0.
rlimit rl;
rl.rlim_cur = 0;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 89058c88b7..bcb4eb32a7 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -18,7 +18,9 @@
// sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
#include <sys/mount.h>
+#ifdef __linux__
#include <linux/fs.h>
+#endif
#include <signal.h>
#include <sys/syscall.h>
@@ -437,6 +439,7 @@ void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_
// Do zygote-mode-only initialization.
bool Runtime::InitZygote() {
+#ifdef __linux__
// zygote goes into its own process group
setpgid(0, 0);
@@ -467,6 +470,10 @@ bool Runtime::InitZygote() {
}
return true;
+#else
+ UNIMPLEMENTED(FATAL);
+ return false;
+#endif
}
void Runtime::DidForkFromZygote() {
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 960d3324d3..46ee27405f 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -327,7 +327,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
while (true) {
}
}
-
+#ifdef __linux__
// Remove our signal handler for this signal...
struct sigaction action;
memset(&action, 0, sizeof(action));
@@ -336,6 +336,9 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
sigaction(signal_number, &action, NULL);
// ...and re-raise so we die with the appropriate status.
kill(getpid(), signal_number);
+#else
+ exit(EXIT_FAILURE);
+#endif
}
void Runtime::InitPlatformSignalHandlers() {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 22f0e8097b..b524f34021 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -495,7 +495,9 @@ void Thread::InitStackHwm() {
}
// TODO: move this into the Linux GetThreadStack implementation.
-#if !defined(__APPLE__)
+#if defined(__APPLE__)
+ bool is_main_thread = false;
+#else
// If we're the main thread, check whether we were run with an unlimited stack. In that case,
// glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
// will be broken because we'll die long before we get close to 2GB.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 7700658fea..f60f795e18 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1054,6 +1054,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
if (current_method != nullptr) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
}
+#ifdef __linux__
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
if (!backtrace->Unwind(0)) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
@@ -1095,6 +1096,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
}
os << "\n";
}
+#endif
}
#if defined(__APPLE__)
diff --git a/test/Android.mk b/test/Android.mk
index 109382d28c..789744968a 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -144,7 +144,7 @@ test-art-host-oat-default-$(1): $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/oat-
ANDROID_DATA=/tmp/android-data/test-art-host-oat-default-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_LIBRARY_PATH) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(HOST_CORE_IMG_LOCATION) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_LIBRARY_PATH) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd$(HOST_SHLIB_SUFFIX) -Ximage:$(HOST_CORE_IMG_LOCATION) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_LIBRARY_PATH) $(1) $(2) \
&& echo test-art-host-oat-default-$(1) PASSED || (echo test-art-host-oat-default-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-default-$(1)
@@ -154,7 +154,7 @@ test-art-host-oat-interpreter-$(1): $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/
ANDROID_DATA=/tmp/android-data/test-art-host-oat-interpreter-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_LIBRARY_PATH) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd.so -Ximage:$(HOST_CORE_IMG_LOCATION) $(DALVIKVM_FLAGS) -Xint -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_LIBRARY_PATH) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd$(HOST_SHLIB_SUFFIX) -Ximage:$(HOST_CORE_IMG_LOCATION) $(DALVIKVM_FLAGS) -Xint -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_LIBRARY_PATH) $(1) $(2) \
&& echo test-art-host-oat-interpreter-$(1) PASSED || (echo test-art-host-oat-interpreter-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-interpreter-$(1)
diff --git a/test/SignalTest/signaltest.cc b/test/SignalTest/signaltest.cc
index b84e3957ce..dfe319712e 100644
--- a/test/SignalTest/signaltest.cc
+++ b/test/SignalTest/signaltest.cc
@@ -46,7 +46,7 @@ extern "C" JNIEXPORT void JNICALL Java_SignalTest_initSignalTest(JNIEnv*, jclass
action.sa_sigaction = signalhandler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__mips__)
+#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
diff --git a/tools/Android.mk b/tools/Android.mk
index 6c385dcb45..d3be17f4d6 100644
--- a/tools/Android.mk
+++ b/tools/Android.mk
@@ -16,7 +16,6 @@
LOCAL_PATH := $(call my-dir)
-ifeq ($(WITH_HOST_DALVIK),true)
# Copy the art shell script to the host's bin directory
include $(CLEAR_VARS)
LOCAL_IS_HOST_MODULE := true
@@ -28,5 +27,3 @@ $(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP)
@echo "Copy: $(PRIVATE_MODULE) ($@)"
$(copy-file-to-new-target)
$(hide) chmod 755 $@
-
-endif