summaryrefslogtreecommitdiffstats
path: root/runtime/scoped_thread_state_change.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-09-13 13:46:47 -0700
committerMathieu Chartier <mathieuc@google.com>2013-11-11 15:34:27 -0800
commit590fee9e8972f872301c2d16a575d579ee564bee (patch)
treeb02db45c72f1911ec896b93379ada0276aea3199 /runtime/scoped_thread_state_change.h
parent5b70680b8df6d8fa95bb8e1070d0107f3d388940 (diff)
downloadart-590fee9e8972f872301c2d16a575d579ee564bee.tar.gz
art-590fee9e8972f872301c2d16a575d579ee564bee.tar.bz2
art-590fee9e8972f872301c2d16a575d579ee564bee.zip
Compacting collector.
The compacting collector is currently similar to semispace. It works by copying objects back and forth between two bump pointer spaces. There are types of objects which are "non-movable" due to current runtime limitations. These are Classes, Methods, and Fields. Bump pointer spaces are a new type of continuous alloc space which have no lock in the allocation code path. When you allocate from these it uses atomic operations to increase an index. Traversing the objects in the bump pointer space relies on Object::SizeOf matching the allocated size exactly. Runtime changes: JNI::GetArrayElements returns copies objects if you attempt to get the backing data of a movable array. For GetArrayElementsCritical, we return direct backing storage for any types of arrays, but temporarily disable the GC until the critical region is completed. Added a new runtime call called VisitObjects, this is used in place of the old pattern which was flushing the allocation stack and walking the bitmaps. Changed image writer to be compaction safe and use object monitor word for forwarding addresses. Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc.. TODO: Enable switching allocators, compacting on background, etc.. Bug: 8981901 Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
Diffstat (limited to 'runtime/scoped_thread_state_change.h')
-rw-r--r--runtime/scoped_thread_state_change.h29
1 files changed, 10 insertions, 19 deletions
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index c39cdb2679..1ca6c4e4fa 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -34,9 +34,8 @@ class ScopedThreadStateChange {
if (UNLIKELY(self_ == NULL)) {
// Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
old_thread_state_ = kTerminated;
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown());
+ CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
bool runnable_transition;
DCHECK_EQ(self, Thread::Current());
@@ -63,9 +62,8 @@ class ScopedThreadStateChange {
~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
if (UNLIKELY(self_ == NULL)) {
if (!expected_has_no_thread_) {
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
CHECK(shutting_down);
}
} else {
@@ -167,6 +165,10 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
return NULL;
}
+ if (kIsDebugBuild) {
+ Runtime::Current()->GetHeap()->VerifyObject(obj);
+ }
+
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
IndirectReferenceTable& locals = Env()->locals;
@@ -185,7 +187,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
}
}
#endif
-
if (Vm()->work_around_app_jni_bugs) {
// Hand out direct pointers to support broken old apps.
return reinterpret_cast<T>(obj);
@@ -206,10 +207,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we should make these unique weak globals if Field instances can ever move.
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingFields);
return reinterpret_cast<mirror::ArtField*>(fid);
}
@@ -217,9 +215,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingFields);
return reinterpret_cast<jfieldID>(field);
}
@@ -227,10 +223,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we should make these unique weak globals if Method instances can ever move.
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingMethods);
return reinterpret_cast<mirror::ArtMethod*>(mid);
}
@@ -238,9 +231,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingMethods);
return reinterpret_cast<jmethodID>(method);
}