diff options
author | Ian Rogers <irogers@google.com> | 2014-06-25 11:52:14 -0700 |
---|---|---|
committer | Ian Rogers <irogers@google.com> | 2014-06-25 11:55:07 -0700 |
commit | c3ccc1039e0bbc0744f958cb8719cf96bce5b853 (patch) | |
tree | 46be84eae8aba7f1698415752262bc16da72aab6 /runtime | |
parent | 3153895d020038cd89311ed6ee241ce9b9f18a59 (diff) | |
download | art-c3ccc1039e0bbc0744f958cb8719cf96bce5b853.tar.gz art-c3ccc1039e0bbc0744f958cb8719cf96bce5b853.tar.bz2 art-c3ccc1039e0bbc0744f958cb8719cf96bce5b853.zip |
Fix the Mac build on x86-64.
Change-Id: I4ed3783a96d844de0b0a295df26d0a48c02a3726
Diffstat (limited to 'runtime')
-rw-r--r-- | runtime/arch/stub_test.cc | 64 | ||||
-rw-r--r-- | runtime/arch/x86_64/asm_support_x86_64.S | 34 | ||||
-rw-r--r-- | runtime/arch/x86_64/context_x86_64.cc | 6 | ||||
-rw-r--r-- | runtime/arch/x86_64/entrypoints_init_x86_64.cc | 4 | ||||
-rw-r--r-- | runtime/arch/x86_64/quick_entrypoints_x86_64.S | 125 | ||||
-rw-r--r-- | runtime/arch/x86_64/thread_x86_64.cc | 10 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_alloc_entrypoints.cc | 4 | ||||
-rw-r--r-- | runtime/mem_map.cc | 6 | ||||
-rw-r--r-- | runtime/mem_map.h | 10 |
9 files changed, 171 insertions, 92 deletions
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 22b8cca4d4..a31c08b8c2 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -258,7 +258,7 @@ class StubTest : public CommonRuntimeTest { "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "memory"); // clobber. -#elif defined(__x86_64__) +#elif defined(__x86_64__) && !defined(__APPLE__) // Note: Uses the native convention // TODO: Set the thread? __asm__ __volatile__( @@ -483,7 +483,7 @@ class StubTest : public CommonRuntimeTest { "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "memory"); // clobber. -#elif defined(__x86_64__) +#elif defined(__x86_64__) && !defined(__APPLE__) // Note: Uses the native convention // TODO: Set the thread? __asm__ __volatile__( @@ -518,7 +518,7 @@ class StubTest : public CommonRuntimeTest { // Method with 32b arg0, 64b arg1 size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self, mirror::ArtMethod* referrer) { -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) // Just pass through. return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer); #else @@ -533,7 +533,7 @@ class StubTest : public CommonRuntimeTest { // Method with 32b arg0, 32b arg1, 64b arg2 size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code, Thread* self, mirror::ArtMethod* referrer) { -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) // Just pass through. return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer); #else @@ -547,12 +547,12 @@ class StubTest : public CommonRuntimeTest { }; -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_memcpy(void); #endif TEST_F(StubTest, Memcpy) { -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); uint32_t orig[20]; @@ -588,12 +588,12 @@ TEST_F(StubTest, Memcpy) { #endif } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_lock_object(void); #endif TEST_F(StubTest, LockObject) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) static constexpr size_t kThinLockLoops = 100; Thread* self = Thread::Current(); @@ -664,14 +664,14 @@ class RandGen { }; -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_lock_object(void); extern "C" void art_quick_unlock_object(void); #endif // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo. static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) static constexpr size_t kThinLockLoops = 100; Thread* self = Thread::Current(); @@ -817,12 +817,12 @@ TEST_F(StubTest, UnlockObject) { TestUnlockObject(this); } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_check_cast(void); #endif TEST_F(StubTest, CheckCast) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); // Find some classes. ScopedObjectAccess soa(self); @@ -867,7 +867,7 @@ TEST_F(StubTest, CheckCast) { } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_aput_obj_with_null_and_bound_check(void); // Do not check non-checked ones, we'd need handlers and stuff... #endif @@ -875,7 +875,7 @@ extern "C" void art_quick_aput_obj_with_null_and_bound_check(void); TEST_F(StubTest, APutObj) { TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); // Create an object ScopedObjectAccess soa(self); @@ -1003,7 +1003,7 @@ TEST_F(StubTest, APutObj) { TEST_F(StubTest, AllocObject) { TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs Thread* self = Thread::Current(); @@ -1125,7 +1125,7 @@ TEST_F(StubTest, AllocObject) { TEST_F(StubTest, AllocObjectArray) { TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs Thread* self = Thread::Current(); @@ -1204,14 +1204,14 @@ TEST_F(StubTest, AllocObjectArray) { } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_string_compareto(void); #endif TEST_F(StubTest, StringCompareTo) { TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs Thread* self = Thread::Current(); @@ -1301,7 +1301,7 @@ TEST_F(StubTest, StringCompareTo) { } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_set32_static(void); extern "C" void art_quick_get32_static(void); #endif @@ -1309,7 +1309,7 @@ extern "C" void art_quick_get32_static(void); static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) constexpr size_t num_values = 7; uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1337,7 +1337,7 @@ static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField> } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_set32_instance(void); extern "C" void art_quick_get32_instance(void); #endif @@ -1345,7 +1345,7 @@ extern "C" void art_quick_get32_instance(void); static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) constexpr size_t num_values = 7; uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1379,7 +1379,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_set_obj_static(void); extern "C" void art_quick_get_obj_static(void); @@ -1406,7 +1406,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test); // Allocate a string object for simplicity. @@ -1422,7 +1422,7 @@ static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_set_obj_instance(void); extern "C" void art_quick_get_obj_instance(void); @@ -1453,7 +1453,7 @@ static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test); // Allocate a string object for simplicity. @@ -1471,7 +1471,7 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFie // TODO: Complete these tests for 32b architectures. -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) extern "C" void art_quick_set64_static(void); extern "C" void art_quick_get64_static(void); #endif @@ -1479,7 +1479,7 @@ extern "C" void art_quick_get64_static(void); static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) constexpr size_t num_values = 8; uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -1506,7 +1506,7 @@ static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField> } -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) extern "C" void art_quick_set64_instance(void); extern "C" void art_quick_get64_instance(void); #endif @@ -1514,7 +1514,7 @@ extern "C" void art_quick_get64_instance(void); static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__x86_64__) || defined(__aarch64__) +#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__) constexpr size_t num_values = 8; uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -1678,12 +1678,12 @@ TEST_F(StubTest, Fields64) { TestFields(self, this, Primitive::Type::kPrimLong); } -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) extern "C" void art_quick_imt_conflict_trampoline(void); #endif TEST_F(StubTest, IMT) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__) +#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING(); Thread* self = Thread::Current(); diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S index 34c8b82166..d3cd4df32e 100644 --- a/runtime/arch/x86_64/asm_support_x86_64.S +++ b/runtime/arch/x86_64/asm_support_x86_64.S @@ -19,7 +19,7 @@ #include "asm_support_x86_64.h" -#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) +#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)) // Clang's as(1) doesn't let you name macro parameters prior to 3.5. #define MACRO0(macro_name) .macro macro_name #define MACRO1(macro_name, macro_arg1) .macro macro_name @@ -27,13 +27,12 @@ #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name #define END_MACRO .endmacro - // Clang's as(1) uses $0, $1, and so on for macro arguments prior to 3.5. + // Clang's as(1) uses $0, $1, and so on for macro arguments. + #define RAW_VAR(name,index) $index #define VAR(name,index) SYMBOL($index) - #define PLT_VAR(name, index) SYMBOL($index)@PLT + #define PLT_VAR(name, index) PLT_SYMBOL($index) #define REG_VAR(name,index) %$index #define CALL_MACRO(name,index) $index - #define FUNCTION_TYPE(name,index) .type $index, @function - #define SIZE(name,index) .size $index, .-$index // The use of $x for arguments mean that literals need to be represented with $$x in macros. #define LITERAL(value) $value @@ -52,17 +51,27 @@ // no special meaning to $, so literals are still just $x. The use of altmacro means % is a // special character meaning care needs to be taken when passing registers as macro arguments. .altmacro + #define RAW_VAR(name,index) name& #define VAR(name,index) name& #define PLT_VAR(name, index) name&@PLT #define REG_VAR(name,index) %name #define CALL_MACRO(name,index) name& - #define FUNCTION_TYPE(name,index) .type name&, @function - #define SIZE(name,index) .size name, .-name #define LITERAL(value) $value #define MACRO_LITERAL(value) $value #endif +#if defined(__APPLE__) + #define FUNCTION_TYPE(name,index) + #define SIZE(name,index) +#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) + #define FUNCTION_TYPE(name,index) .type $index, @function + #define SIZE(name,index) .size $index, .-$index +#else + #define FUNCTION_TYPE(name,index) .type name&, @function + #define SIZE(name,index) .size name, .-name +#endif + // CFI support. #if !defined(__APPLE__) #define CFI_STARTPROC .cfi_startproc @@ -86,9 +95,14 @@ // Symbols. #if !defined(__APPLE__) #define SYMBOL(name) name - #define PLT_SYMBOL(name) name ## @PLT + #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) + // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a + // better fix. + #define PLT_SYMBOL(name) name // ## @PLT + #else + #define PLT_SYMBOL(name) name ## @PLT + #endif #else - // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name #define PLT_SYMBOL(name) _ ## name #endif @@ -103,8 +117,6 @@ MACRO1(DEFINE_FUNCTION, c_name) .globl VAR(c_name, 0) ALIGN_FUNCTION_ENTRY VAR(c_name, 0): - // Have a local entrypoint that's not globl -VAR(c_name, 0)_local: CFI_STARTPROC // Ensure we get a sane starting CFA. CFI_DEF_CFA(rsp, 8) diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc index 0ccbd279f5..e1f47ee3d4 100644 --- a/runtime/arch/x86_64/context_x86_64.cc +++ b/runtime/arch/x86_64/context_x86_64.cc @@ -59,8 +59,8 @@ void X86_64Context::FillCalleeSaves(const StackVisitor& fr) { size_t j = 2; // Offset j to skip return address spill. for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) { if (((frame_info.FpSpillMask() >> i) & 1) != 0) { - fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, - frame_info.FrameSizeInBytes()); + fprs_[i] = reinterpret_cast<uint64_t*>( + fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_info.FrameSizeInBytes())); j++; } } @@ -93,7 +93,7 @@ bool X86_64Context::SetGPR(uint32_t reg, uintptr_t value) { bool X86_64Context::SetFPR(uint32_t reg, uintptr_t value) { CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters)); - CHECK_NE(fprs_[reg], &gZero); + CHECK_NE(fprs_[reg], reinterpret_cast<const uint64_t*>(&gZero)); if (fprs_[reg] != nullptr) { *fprs_[reg] = value; return true; diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 92aabeeb14..b6f51f741c 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -112,6 +112,9 @@ extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints); void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) { +#if defined(__APPLE__) + UNIMPLEMENTED(FATAL); +#else // Interpreter ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge; ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge; @@ -216,6 +219,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; +#endif // __APPLE__ }; } // namespace art diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index c9220c87ba..668fb882c7 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -23,6 +23,10 @@ * Runtime::CreateCalleeSaveMethod(kSaveAll) */ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) +#if defined(__APPLE__) + int3 + int3 +#else // R10 := Runtime::Current() movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 movq (%r10), %r10 @@ -45,6 +49,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6*8 + 8 + 8) #error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected." #endif +#endif // __APPLE__ END_MACRO /* @@ -52,6 +57,10 @@ END_MACRO * Runtime::CreateCalleeSaveMethod(kRefsOnly) */ MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME) +#if defined(__APPLE__) + int3 + int3 +#else // R10 := Runtime::Current() movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 movq (%r10), %r10 @@ -74,6 +83,7 @@ MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME) #if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6*8 + 8 + 8) #error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected." #endif +#endif // __APPLE__ END_MACRO MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME) @@ -93,6 +103,10 @@ END_MACRO * Runtime::CreateCalleeSaveMethod(kRefsAndArgs) */ MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME) +#if defined(__APPLE__) + int3 + int3 +#else // R10 := Runtime::Current() movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 movq (%r10), %r10 @@ -130,6 +144,7 @@ MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME) #if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11*8 + 80 + 8) #error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected." #endif +#endif // __APPLE__ END_MACRO MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME) @@ -366,6 +381,10 @@ END_MACRO * r9 = char* shorty */ DEFINE_FUNCTION art_quick_invoke_stub +#if defined(__APPLE__) + int3 + int3 +#else // Set up argument XMM registers. leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character. leaq 4(%rsi), %r11 // R11 := arg_array + 4 ; ie skip this pointer. @@ -431,6 +450,7 @@ DEFINE_FUNCTION art_quick_invoke_stub .Lreturn_float_quick: movss %xmm0, (%r8) // Store the floating point result. ret +#endif // __APPLE__ END_FUNCTION art_quick_invoke_stub /* @@ -445,6 +465,10 @@ END_FUNCTION art_quick_invoke_stub * r9 = char* shorty */ DEFINE_FUNCTION art_quick_invoke_static_stub +#if defined(__APPLE__) + int3 + int3 +#else // Set up argument XMM registers. leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character movq %rsi, %r11 // R11 := arg_array @@ -509,6 +533,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub .Lreturn_float_quick2: movss %xmm0, (%r8) // Store the floating point result. ret +#endif // __APPLE__ END_FUNCTION art_quick_invoke_static_stub MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro) @@ -559,6 +584,45 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) END_FUNCTION VAR(c_name, 0) END_MACRO +MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + movl 8(%rsp), %esi // pass referrer + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + // arg0 is in rdi + movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() + movq %rsp, %rcx // pass SP + call PLT_VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + movl 8(%rsp), %edx // pass referrer + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + // arg0 and arg1 are in rdi/rsi + movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() + movq %rsp, %r8 // pass SP + call PLT_VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) + END_FUNCTION VAR(c_name, 0) +END_MACRO + +MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) + DEFINE_FUNCTION VAR(c_name, 0) + movl 8(%rsp), %ecx // pass referrer + SETUP_REF_ONLY_CALLEE_SAVE_FRAME + // arg0, arg1, and arg2 are in rdi/rsi/rdx + movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current() + movq %rsp, %r9 // pass SP + call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP) + RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro, 2) // return or deliver exception + END_FUNCTION VAR(c_name, 0) +END_MACRO + MACRO0(RETURN_IF_RESULT_IS_NON_ZERO) testq %rax, %rax // rax == 0 ? jz 1f // if rax == 0 goto 1 @@ -783,14 +847,23 @@ END_FUNCTION art_quick_check_cast * rdi(edi) = array, rsi(esi) = index, rdx(edx) = value */ DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check +#if defined(__APPLE__) + int3 + int3 +#else testl %edi, %edi // testq %rdi, %rdi jnz art_quick_aput_obj_with_bound_check_local jmp art_quick_throw_null_pointer_exception_local +#endif // __APPLE__ END_FUNCTION art_quick_aput_obj_with_null_and_bound_check DEFINE_FUNCTION art_quick_aput_obj_with_bound_check +#if defined(__APPLE__) + int3 + int3 +#else movl ARRAY_LENGTH_OFFSET(%edi), %ecx // movl ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx) cmpl %ecx, %esi @@ -800,6 +873,7 @@ DEFINE_FUNCTION art_quick_aput_obj_with_bound_check mov %ecx, %esi // mov %rcx, %rsi jmp art_quick_throw_array_bounds_local +#endif // __APPLE__ END_FUNCTION art_quick_aput_obj_with_bound_check @@ -894,47 +968,6 @@ UNIMPLEMENTED art_quick_lshl UNIMPLEMENTED art_quick_lshr UNIMPLEMENTED art_quick_lushr - -MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %esi // pass referrer - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - // arg0 is in rdi - movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - movq %rsp, %rcx // pass SP - call PLT_VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %edx // pass referrer - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - // arg0 and arg1 are in rdi/rsi - movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() - movq %rsp, %r8 // pass SP - call PLT_VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) - END_FUNCTION VAR(c_name, 0) -END_MACRO - -MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %ecx // pass referrer - SETUP_REF_ONLY_CALLEE_SAVE_FRAME - // arg0, arg1, and arg2 are in rdi/rsi/rdx - movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current() - movq %rsp, %r9 // pass SP - call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP) - RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) -END_MACRO - - THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO @@ -1006,10 +1039,15 @@ END_FUNCTION art_quick_proxy_invoke_handler * rax is a hidden argument that holds the target method's dex method index. */ DEFINE_FUNCTION art_quick_imt_conflict_trampoline +#if defined(__APPLE__) + int3 + int3 +#else movl 8(%rsp), %edi // load caller Method* movl METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods movl OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method jmp art_quick_invoke_interface_trampoline_local +#endif // __APPLE__ END_FUNCTION art_quick_imt_conflict_trampoline DEFINE_FUNCTION art_quick_resolution_trampoline @@ -1294,6 +1332,10 @@ END_FUNCTION art_quick_to_interpreter_bridge * Routine that intercepts method calls and returns. */ DEFINE_FUNCTION art_quick_instrumentation_entry +#if defined(__APPLE__) + int3 + int3 +#else SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME movq %rdi, %r12 // Preserve method pointer in a callee-save. @@ -1313,6 +1355,7 @@ DEFINE_FUNCTION art_quick_instrumentation_entry RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME jmp *%rax // Tail call to intended method. +#endif // __APPLE__ END_FUNCTION art_quick_instrumentation_entry DEFINE_FUNCTION art_quick_instrumentation_exit diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc index b7a5c43839..6dff2b4a55 100644 --- a/runtime/arch/x86_64/thread_x86_64.cc +++ b/runtime/arch/x86_64/thread_x86_64.cc @@ -21,18 +21,28 @@ #include "thread-inl.h" #include "thread_list.h" +#if defined(__linux__) #include <asm/prctl.h> #include <sys/prctl.h> #include <sys/syscall.h> +#endif namespace art { +#if defined(__linux__) static void arch_prctl(int code, void* val) { syscall(__NR_arch_prctl, code, val); } +#endif + void Thread::InitCpu() { MutexLock mu(nullptr, *Locks::modify_ldt_lock_); + +#if defined(__linux__) arch_prctl(ARCH_SET_GS, this); +#else + UNIMPLEMENTED(FATAL) << "Need to set GS"; +#endif // Allow easy indirection back to Thread*. tlsPtr_.self = this; diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index 330125458e..dde74de87a 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -154,10 +154,12 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument } // Generate the entrypoint functions. +#if !defined(__APPLE__) || !defined(__LP64__) GENERATE_ENTRYPOINTS(_dlmalloc); GENERATE_ENTRYPOINTS(_rosalloc); GENERATE_ENTRYPOINTS(_bump_pointer); GENERATE_ENTRYPOINTS(_tlab); +#endif static bool entry_points_instrumented = false; static gc::AllocatorType entry_points_allocator = gc::kAllocatorTypeDlMalloc; @@ -172,6 +174,7 @@ void SetQuickAllocEntryPointsInstrumented(bool instrumented) { void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) { switch (entry_points_allocator) { +#if !defined(__APPLE__) || !defined(__LP64__) case gc::kAllocatorTypeDlMalloc: { SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented); break; @@ -190,6 +193,7 @@ void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) { SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented); break; } +#endif default: { LOG(FATAL) << "Unimplemented"; } diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 8d987dfd9a..1074253fea 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -72,7 +72,7 @@ std::ostream& operator<<(std::ostream& os, const std::multimap<void*, MemMap*>& std::multimap<void*, MemMap*> MemMap::maps_; -#if defined(__LP64__) && !defined(__x86_64__) +#if USE_ART_LOW_4G_ALLOCATOR // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. // The regular start of memory allocations. The first 64KB is protected by SELinux. @@ -235,7 +235,7 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count // A page allocator would be a useful abstraction here, as // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us // 2) The linear scheme, even with simple saving of the last known position, is very crude -#if defined(__LP64__) && !defined(__x86_64__) +#if USE_ART_LOW_4G_ALLOCATOR // MAP_32BIT only available on x86_64. void* actual = MAP_FAILED; if (low_4gb && expected == nullptr) { @@ -299,7 +299,7 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count } #else -#ifdef __x86_64__ +#if defined(__LP64__) if (low_4gb && expected == nullptr) { flags |= MAP_32BIT; } diff --git a/runtime/mem_map.h b/runtime/mem_map.h index e42251ce57..defa6a52fd 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -30,6 +30,12 @@ namespace art { +#if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__)) +#define USE_ART_LOW_4G_ALLOCATOR 1 +#else +#define USE_ART_LOW_4G_ALLOCATOR 0 +#endif + #ifdef __linux__ static constexpr bool kMadviseZeroes = true; #else @@ -147,8 +153,8 @@ class MemMap { size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote). int prot_; // Protection of the map. -#if defined(__LP64__) && !defined(__x86_64__) - static uintptr_t next_mem_pos_; // next memory location to check for low_4g extent +#if USE_ART_LOW_4G_ALLOCATOR + static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent. #endif // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()). |