summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-07 15:43:14 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-13 14:45:54 -0700
commiteb8167a4f4d27fce0530f6724ab8032610cd146b (patch)
treebcfeaf13ad78f2dd68466bbd0e20c71944f7e854 /runtime/gc
parent6fb66a2bc4e1c0b7931101153e58714991237af7 (diff)
downloadandroid_art-eb8167a4f4d27fce0530f6724ab8032610cd146b.tar.gz
android_art-eb8167a4f4d27fce0530f6724ab8032610cd146b.tar.bz2
android_art-eb8167a4f4d27fce0530f6724ab8032610cd146b.zip
Add Handle/HandleScope and delete SirtRef.
Delete SirtRef and replaced it with Handle. Handles are value types which wrap around StackReference*. Renamed StackIndirectReferenceTable to HandleScope. Added a scoped handle wrapper which wraps around an Object** and restores it in its destructor. Renamed Handle::get -> Get. Bug: 8473721 Change-Id: Idbfebd4f35af629f0f43931b7c5184b334822c7a
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/collector/garbage_collector.h2
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/collector/semi_space-inl.h4
-rw-r--r--runtime/gc/collector/semi_space.cc8
-rw-r--r--runtime/gc/collector/semi_space.h2
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc2
-rw-r--r--runtime/gc/heap-inl.h20
-rw-r--r--runtime/gc/heap.cc91
-rw-r--r--runtime/gc/heap.h11
-rw-r--r--runtime/gc/heap_test.cc14
-rw-r--r--runtime/gc/space/malloc_space.cc2
-rw-r--r--runtime/gc/space/space_test.h111
13 files changed, 145 insertions, 128 deletions
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index d05f45b98c..02dd4d956e 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@ class GarbageCollector {
virtual void RevokeAllThreadLocalBuffers() = 0;
// Record that you have freed some objects or large objects, calls Heap::RecordFree.
- // TODO: These are not thread safe, add a lock if we get have parallel sweeping.
+ // TODO: These are not thread safe, add a lock if we get parallel sweeping.
void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5de7026916..cc258f5a9a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -106,7 +106,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
void MarkSweep::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
class_count_ = 0;
@@ -123,7 +123,7 @@ void MarkSweep::InitializePhase() {
mark_fastpath_count_ = 0;
mark_slowpath_count_ = 0;
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3ebc0af178..cfb0b5e51c 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -264,7 +264,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by
+ // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 55140f613a..47682cc584 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -50,7 +50,7 @@ inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object
return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
}
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// Used to mark and copy objects. Any newly-marked objects who are in the from space Get moved to
// the to-space and have their forward address updated. Objects which have been newly marked are
// pushed on the mark stack.
template<bool kPoisonReferences>
@@ -72,7 +72,7 @@ inline void SemiSpace::MarkObject(
forward_address = MarkNonForwardedObject(obj);
DCHECK(forward_address != nullptr);
// Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't get stomped over.
+ // monitor word doesn't Get stomped over.
obj->SetLockWord(
LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
// Push the object onto the mark stack for later processing.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a406f6da8c..95a2c96091 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -142,7 +142,7 @@ void SemiSpace::RunPhases() {
void SemiSpace::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
is_large_object_space_immune_ = false;
@@ -154,7 +154,7 @@ void SemiSpace::InitializePhase() {
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
@@ -172,7 +172,7 @@ void SemiSpace::MarkingPhase() {
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
if (kStoreStackTraces) {
Locks::mutator_lock_->AssertExclusiveHeld(self_);
- // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // Store the stack traces into the runtime fault string in case we Get a heap corruption
// related crash later.
ThreadState old_state = self_->SetStateUnsafe(kRunnable);
std::ostringstream oss;
@@ -231,7 +231,7 @@ void SemiSpace::MarkingPhase() {
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
- // Clear the whole card table since we can not get any additional dirty cards during the
+ // Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
timings_.NewSplit("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9fdf4717b8..4b1ecc4083 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -242,7 +242,7 @@ class SemiSpace : public GarbageCollector {
// heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // How many objects and bytes we moved, used so that we don't need to get the size of the
+ // How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
size_t bytes_moved_;
size_t objects_moved_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index ce51ac54bd..5a5844642b 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -49,7 +49,7 @@ void StickyMarkSweep::BindBitmaps() {
void StickyMarkSweep::MarkReachableObjects() {
// All reachable objects must be referenced by a root or a dirty card, so we can clear the mark
- // stack here since all objects in the mark stack will get scanned by the card scanning anyways.
+ // stack here since all objects in the mark stack will Get scanned by the card scanning anyways.
// TODO: Not put these objects in the mark stack in the first place.
mark_stack_->Reset();
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index a06f272b55..7cee5a094c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -27,7 +27,7 @@
#include "gc/space/large_object_space.h"
#include "gc/space/rosalloc_space-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -144,10 +144,10 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
@@ -159,10 +159,10 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
while (!allocation_stack_->AtomicPushBack(*obj)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
}
}
@@ -300,11 +300,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<mirror::Object> ref(self, *obj);
- RequestConcurrentGC(self);
- // Restore obj in case it moved.
- *obj = ref.get();
+ RequestConcurrentGCAndSaveObject(self, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 723572991c..4642a9824a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -62,7 +62,7 @@
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread_list.h"
#include "UniquePtr.h"
#include "well_known_classes.h"
@@ -1070,10 +1070,11 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
mirror::Class** klass) {
- mirror::Object* ptr = nullptr;
bool was_default_allocator = allocator == GetCurrentAllocator();
DCHECK(klass != nullptr);
- SirtRef<mirror::Class> sirt_klass(self, *klass);
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
+ klass = nullptr; // Invalidate for safety.
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1081,31 +1082,32 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
collector::GcType tried_type = next_gc_type_;
- if (ptr == nullptr) {
- const bool gc_ran =
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- if (gc_ran) {
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
+ }
+ if (gc_ran) {
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
}
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
- if (ptr != nullptr) {
- break;
- }
if (gc_type == tried_type) {
continue;
}
@@ -1113,40 +1115,41 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
if (gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
}
// Allocations have failed after GCs; this is an exceptional state.
- if (ptr == nullptr) {
- // Try harder, growing the heap if necessary.
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ // Try harder, growing the heap if necessary.
+ mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
+ // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+ // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+ // VM spec requires that all SoftReferences have been collected and cleared before throwing
+ // OOME.
+ VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+ << " allocation";
+ // TODO: Run finalization, but this may cause more allocations to occur.
+ // We don't need a WaitForGcToComplete here either.
+ DCHECK(!gc_plan_.empty());
+ CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
}
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
if (ptr == nullptr) {
- // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
- // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
- // VM spec requires that all SoftReferences have been collected and cleared before throwing
- // OOME.
- VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
- << " allocation";
- // TODO: Run finalization, but this may cause more allocations to occur.
- // We don't need a WaitForGcToComplete here either.
- DCHECK(!gc_plan_.empty());
- CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
- if (ptr == nullptr) {
- ThrowOutOfMemoryError(self, alloc_size, false);
- }
+ ThrowOutOfMemoryError(self, alloc_size, false);
}
- *klass = sirt_klass.get();
return ptr;
}
@@ -2536,6 +2539,12 @@ void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
*object = soa.Decode<mirror::Object*>(arg.get());
}
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ RequestConcurrentGC(self);
+}
+
void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f71de1ac89..3b071d19d3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -412,7 +412,7 @@ class Heap {
return GetTotalMemory() - num_bytes_allocated_;
}
- // Get the space that corresponds to an object's address. Current implementation searches all
+ // get the space that corresponds to an object's address. Current implementation searches all
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
@@ -582,6 +582,10 @@ class Heap {
mirror::Object** obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ accounting::ObjectStack* GetMarkStack() {
+ return mark_stack_.get();
+ }
+
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
@@ -634,7 +638,10 @@ class Heap {
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
- void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RequestConcurrentGC(Thread* self)
+ LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
// Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index a85ad4d040..8850b92190 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -20,7 +20,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
namespace gc {
@@ -43,14 +43,16 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
ScopedObjectAccess soa(Thread::Current());
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
- SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object> > array(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
- // SIRT operator -> deferences the SIRT before running the method.
+ // handle scope operator -> deferences the handle scope before running the method.
array->Set<false>(j, string);
}
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 7493c19a94..ba46dccfb2 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -24,7 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 3335e72dab..ce101e4b63 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -48,7 +48,8 @@ class SpaceTest : public CommonRuntimeTest {
}
mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::ClassLoader> null_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
mirror::Class* byte_array_class =
Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
@@ -62,10 +63,11 @@ class SpaceTest : public CommonRuntimeTest {
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -73,10 +75,11 @@ class SpaceTest : public CommonRuntimeTest {
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -177,9 +180,10 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -190,9 +194,9 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -206,23 +210,23 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
+ EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
@@ -243,8 +247,8 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
AddSpace(space, false);
// Succeeds, fits without adjusting the footprint limit.
- ptr1.reset(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -254,16 +258,16 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3.reset(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(2U * MB, ptr3_bytes_allocated);
EXPECT_LE(2U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
// Final clean up.
- free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -279,9 +283,10 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -292,9 +297,9 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -308,23 +313,23 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -345,8 +350,6 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
&usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -360,8 +363,6 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
size_t allocation_size, usable_size;
lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -418,18 +419,19 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
alloc_size = size_of_zero_length_byte_array;
}
}
- SirtRef<mirror::Object> object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto object(hs.NewHandle<mirror::Object>(nullptr));
size_t bytes_allocated = 0;
if (round <= 1) {
- object.reset(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
} else {
- object.reset(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
- if (object.get() != nullptr) { // allocation succeeded
- lots_of_objects[i] = object.get();
- size_t allocation_size = space->AllocationSize(object.get(), nullptr);
+ if (object.Get() != nullptr) { // allocation succeeded
+ lots_of_objects[i] = object.Get();
+ size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
EXPECT_EQ(bytes_allocated, allocation_size);
if (object_size > 0) {
EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -509,16 +511,17 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
space->RevokeAllThreadLocalBuffers();
// All memory was released, try a large allocation to check freed memory is being coalesced
- SirtRef<mirror::Object> large_object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto large_object(hs.NewHandle<mirror::Object>(nullptr));
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
if (round <= 1) {
- large_object.reset(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+ large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
} else {
- large_object.reset(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
- nullptr));
+ large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
+ nullptr));
}
- EXPECT_TRUE(large_object.get() != nullptr);
+ EXPECT_TRUE(large_object.Get() != nullptr);
// Sanity check footprint
footprint = space->GetFootprint();
@@ -527,7 +530,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
EXPECT_LE(space->Size(), growth_limit);
// Clean up
- space->Free(self, large_object.reset(nullptr));
+ space->Free(self, large_object.Assign(nullptr));
// Sanity check footprint
footprint = space->GetFootprint();