diff options
-rw-r--r-- | build/Android.common_build.mk | 2 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 36 | ||||
-rw-r--r-- | runtime/gc/allocator/rosalloc.h | 2 | ||||
-rw-r--r-- | runtime/handle_scope-inl.h | 5 | ||||
-rw-r--r-- | runtime/handle_scope.h | 104 | ||||
-rw-r--r-- | runtime/handle_scope_test.cc | 6 | ||||
-rw-r--r-- | runtime/thread.h | 2 |
7 files changed, 78 insertions, 79 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 276e81af2a..14edb716bf 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -196,7 +196,7 @@ ART_TARGET_CLANG_CFLAGS_arm64 += \ -fno-vectorize art_debug_cflags := \ - -O1 \ + -O2 \ -DDYNAMIC_ANNOTATIONS_ENABLED=1 \ -UNDEBUG diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 054dd4698d..96903db414 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -1275,8 +1275,8 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // is at *m = sp. Will update to point to the bottom of the save frame. // // Note: assumes ComputeAll() has been run before. - void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, - uint32_t* handle_scope_entries) + void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp, + HandleScope** handle_scope) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::ArtMethod* method = (*m)->AsMirrorPtr(); @@ -1289,8 +1289,6 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { sp8 += sizeof(void*); // In the callee-save frame we use a full pointer. // Under the callee saves put handle scope and new method stack reference. - *handle_scope_entries = num_handle_scope_references_; - size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>); @@ -1300,8 +1298,8 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>); - *table = reinterpret_cast<HandleScope*>(sp8_table); - (*table)->SetNumberOfReferences(num_handle_scope_references_); + *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), + num_handle_scope_references_); // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. uint8_t* method_pointer = sp8; @@ -1319,12 +1317,12 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. // Returns the new bottom. Note: this may be unaligned. - uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table, - uint32_t* handle_scope_entries) + uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp, + HandleScope** handle_scope) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // First, fix up the layout of the callee-save frame. // We have to squeeze in the HandleScope, and relocate the method pointer. - LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries); + LayoutCalleeSaveFrame(self, m, sp, handle_scope); // The bottom of the callee-save frame is now where the method is, *m. uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m); @@ -1336,14 +1334,15 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { } // WARNING: After this, *sp won't be pointing to the method anymore! - uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty, - uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries, + uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m, + bool is_static, const char* shorty, uint32_t shorty_len, + HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Walk(shorty, shorty_len); // JNI part. - uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries); + uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope); sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr); @@ -1426,20 +1425,19 @@ class FillNativeCall { // of transitioning into native code. class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { public: - BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static, - const char* shorty, uint32_t shorty_len, Thread* self) + BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, + StackReference<mirror::ArtMethod>** sp) : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { ComputeGenericJniFrameSize fsc; uintptr_t* start_gpr_reg; uint32_t* start_fpr_reg; uintptr_t* start_stack_arg; - uint32_t handle_scope_entries; - bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_, - &handle_scope_entries, &start_stack_arg, + bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len, + &handle_scope_, + &start_stack_arg, &start_gpr_reg, &start_fpr_reg); - handle_scope_->SetNumberOfReferences(handle_scope_entries); jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_); // jni environment is always first argument @@ -1611,7 +1609,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, const char* shorty = called->GetShorty(&shorty_len); // Run the visitor. - BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self); + BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); visitor.VisitArguments(); visitor.FinalizeHandleScope(self); diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index 8374ff70b2..ad7f901181 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -285,7 +285,7 @@ class RosAlloc { // Returns the byte size of the bracket size from the index. static size_t IndexToBracketSize(size_t idx) { - DCHECK(idx < kNumOfSizeBrackets); + DCHECK_LT(idx, kNumOfSizeBrackets); return bracketSizes[idx]; } // Returns the index of the size bracket from the bracket size. diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index 7bc811db87..2717180b4d 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -26,9 +26,10 @@ namespace art { template<size_t kNumReferences> inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self) - : HandleScope(kNumReferences), self_(self), pos_(0) { + : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) { + COMPILE_ASSERT(kNumReferences >= 1, stack_handle_scope_must_contain_at_least_1_reference); // TODO: Figure out how to use a compile assert. - DCHECK_EQ(&references_[0], &references_storage_[0]); + CHECK_EQ(&storage_[0], GetReferences()); for (size_t i = 0; i < kNumReferences; ++i) { SetReference(i, nullptr); } diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 99059f9e59..f795e387f0 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -66,39 +66,28 @@ class PACKED(4) HandleScope { return link_; } - void SetLink(HandleScope* link) { - DCHECK_NE(this, link); - link_ = link; - } - - // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will - // not resize anything. - void SetNumberOfReferences(uint32_t num_references) { - number_of_references_ = num_references; - } - - mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { + ALWAYS_INLINE mirror::Object* GetReference(size_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_LT(i, number_of_references_); - return references_[i].AsMirrorPtr(); + return GetReferences()[i].AsMirrorPtr(); } - Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { + ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_LT(i, number_of_references_); - return Handle<mirror::Object>(&references_[i]); + return Handle<mirror::Object>(&GetReferences()[i]); } - MutableHandle<mirror::Object> GetMutableHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_LT(i, number_of_references_); - return MutableHandle<mirror::Object>(&references_[i]); + return MutableHandle<mirror::Object>(&GetReferences()[i]); } - void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { + ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK_LT(i, number_of_references_); - references_[i].Assign(object); + GetReferences()[i].Assign(object); } bool Contains(StackReference<mirror::Object>* handle_scope_entry) const { @@ -106,39 +95,53 @@ class PACKED(4) HandleScope { // jni_compiler should have a jobject/jclass as a native method is // passed in a this pointer or a class DCHECK_GT(number_of_references_, 0U); - return &references_[0] <= handle_scope_entry && - handle_scope_entry <= &references_[number_of_references_ - 1]; + return &GetReferences()[0] <= handle_scope_entry && + handle_scope_entry <= &GetReferences()[number_of_references_ - 1]; } - // Offset of link within HandleScope, used by generated code + // Offset of link within HandleScope, used by generated code. static size_t LinkOffset(size_t pointer_size) { return 0; } - // Offset of length within handle scope, used by generated code + // Offset of length within handle scope, used by generated code. static size_t NumberOfReferencesOffset(size_t pointer_size) { return pointer_size; } - // Offset of link within handle scope, used by generated code + // Offset of link within handle scope, used by generated code. static size_t ReferencesOffset(size_t pointer_size) { return pointer_size + sizeof(number_of_references_); } + // Placement new creation. + static HandleScope* Create(void* storage, HandleScope* link, uint32_t num_references) + WARN_UNUSED { + return new (storage) HandleScope(link, num_references); + } + protected: - explicit HandleScope(size_t number_of_references) : - link_(nullptr), number_of_references_(number_of_references) { + // Return backing storage used for references. + ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const { + uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(sizeof(void*)); + return reinterpret_cast<StackReference<mirror::Object>*>(address); } - HandleScope* link_; - uint32_t number_of_references_; + // Semi-hidden constructor. Construction expected by generated code and StackHandleScope. + explicit HandleScope(HandleScope* link, uint32_t num_references) : + link_(link), number_of_references_(num_references) { + } - // number_of_references_ are available if this is allocated and filled in by jni_compiler. - StackReference<mirror::Object> references_[0]; + // Link-list of handle scopes. The root is held by a Thread. + HandleScope* const link_; - private: - template<size_t kNumReferences> friend class StackHandleScope; + // Number of handlerized references. + const uint32_t number_of_references_; + + // Storage for references. + // StackReference<mirror::Object> references_[number_of_references_] + private: DISALLOW_COPY_AND_ASSIGN(HandleScope); }; @@ -169,22 +172,22 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { // Currently unused, using this GetReference instead of the one in HandleScope is preferred to // avoid compiler optimizations incorrectly optimizing out of bound array accesses. // TODO: Remove this when it is un-necessary. - mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { - DCHECK_LT(i, number_of_references_); - return references_storage_[i].AsMirrorPtr(); + ALWAYS_INLINE mirror::Object* GetReference(size_t i) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK_LT(i, kNumReferences); + return GetReferences()[i].AsMirrorPtr(); } - MutableHandle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { - DCHECK_LT(i, number_of_references_); - return MutableHandle<mirror::Object>(&references_storage_[i]); + ALWAYS_INLINE MutableHandle<mirror::Object> GetHandle(size_t i) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK_LT(i, kNumReferences); + return MutableHandle<mirror::Object>(&GetReferences()[i]); } - void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - ALWAYS_INLINE { - DCHECK_LT(i, number_of_references_); - references_storage_[i].Assign(object); + ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK_LT(i, kNumReferences); + GetReferences()[i].Assign(object); } template<class T> @@ -204,9 +207,8 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { } private: - // References_storage_ needs to be first so that it appears in the same location as - // HandleScope::references_. - StackReference<mirror::Object> references_storage_[kNumReferences]; + // Reference storage needs to be first as expected by the HandleScope layout. + StackReference<mirror::Object> storage_[kNumReferences]; // The thread that the stack handle scope is a linked list upon. The stack handle scope will // push and pop itself from this thread. diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc index 7afd279942..dc999877d0 100644 --- a/runtime/handle_scope_test.cc +++ b/runtime/handle_scope_test.cc @@ -25,7 +25,7 @@ namespace art { template<size_t kNumReferences> class NoThreadStackHandleScope : public HandleScope { public: - explicit NoThreadStackHandleScope() : HandleScope(kNumReferences) { + explicit NoThreadStackHandleScope(HandleScope* link) : HandleScope(link, kNumReferences) { } ~NoThreadStackHandleScope() { } @@ -41,10 +41,8 @@ class NoThreadStackHandleScope : public HandleScope { TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS { // As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER // here. So do the inverse: set some data, and access it through pointers created from the offsets. - NoThreadStackHandleScope<1> test_table; + NoThreadStackHandleScope<0x9ABC> test_table(reinterpret_cast<HandleScope*>(0x5678)); test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234)); - test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678)); - test_table.SetNumberOfReferences(0x9ABC); uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table); diff --git a/runtime/thread.h b/runtime/thread.h index 998e47275c..b0be841730 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -692,7 +692,7 @@ class Thread { } void PushHandleScope(HandleScope* handle_scope) { - handle_scope->SetLink(tlsPtr_.top_handle_scope); + DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope); tlsPtr_.top_handle_scope = handle_scope; } |