summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/driver/compiler_driver.cc97
-rw-r--r--compiler/driver/compiler_driver.h3
-rw-r--r--compiler/driver/compiler_driver_test.cc9
-rw-r--r--compiler/image_test.cc11
-rw-r--r--compiler/image_writer.cc346
-rw-r--r--compiler/image_writer.h48
-rw-r--r--compiler/jni/jni_compiler_test.cc4
-rw-r--r--compiler/oat_test.cc3
-rw-r--r--compiler/oat_writer.cc37
-rw-r--r--oatdump/oatdump.cc57
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/alloc_entrypoints.S36
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S203
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S26
-rw-r--r--runtime/base/mutex-inl.h2
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h5
-rw-r--r--runtime/base/timing_logger.cc5
-rw-r--r--runtime/base/timing_logger.h1
-rw-r--r--runtime/check_jni.cc17
-rw-r--r--runtime/class_linker-inl.h34
-rw-r--r--runtime/class_linker.cc356
-rw-r--r--runtime/class_linker.h80
-rw-r--r--runtime/class_linker_test.cc48
-rw-r--r--runtime/common_test.h15
-rw-r--r--runtime/debugger.cc32
-rw-r--r--runtime/dex_file.cc14
-rw-r--r--runtime/dex_file.h10
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc4
-rw-r--r--runtime/entrypoints/entrypoint_utils.h8
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc12
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc10
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc8
-rw-r--r--runtime/gc/collector/garbage_collector.cc10
-rw-r--r--runtime/gc/collector/garbage_collector.h38
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h16
-rw-r--r--runtime/gc/collector/mark_sweep.cc105
-rw-r--r--runtime/gc/collector/mark_sweep.h53
-rw-r--r--runtime/gc/collector/partial_mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space-inl.h37
-rw-r--r--runtime/gc/collector/semi_space.cc799
-rw-r--r--runtime/gc/collector/semi_space.h289
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc5
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h8
-rw-r--r--runtime/gc/heap-inl.h51
-rw-r--r--runtime/gc/heap.cc909
-rw-r--r--runtime/gc/heap.h158
-rw-r--r--runtime/gc/heap_test.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h52
-rw-r--r--runtime/gc/space/bump_pointer_space.cc88
-rw-r--r--runtime/gc/space/bump_pointer_space.h149
-rw-r--r--runtime/gc/space/dlmalloc_space.cc75
-rw-r--r--runtime/gc/space/dlmalloc_space.h19
-rw-r--r--runtime/gc/space/image_space.cc7
-rw-r--r--runtime/gc/space/large_object_space.h8
-rw-r--r--runtime/gc/space/space-inl.h18
-rw-r--r--runtime/gc/space/space.cc1
-rw-r--r--runtime/gc/space/space.h124
-rw-r--r--runtime/gc/space/space_test.cc14
-rw-r--r--runtime/globals.h9
-rw-r--r--runtime/intern_table.cc8
-rw-r--r--runtime/interpreter/interpreter.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc7
-rw-r--r--runtime/jni_internal.cc142
-rw-r--r--runtime/jni_internal.h3
-rw-r--r--runtime/jni_internal_test.cc22
-rw-r--r--runtime/lock_word-inl.h5
-rw-r--r--runtime/lock_word.h32
-rw-r--r--runtime/mirror/array-inl.h40
-rw-r--r--runtime/mirror/array.cc15
-rw-r--r--runtime/mirror/array.h23
-rw-r--r--runtime/mirror/class-inl.h23
-rw-r--r--runtime/mirror/class.cc12
-rw-r--r--runtime/mirror/class.h7
-rw-r--r--runtime/mirror/object.cc64
-rw-r--r--runtime/mirror/object.h1
-rw-r--r--runtime/mirror/object_array-inl.h9
-rw-r--r--runtime/mirror/object_test.cc32
-rw-r--r--runtime/mirror/stack_trace_element.cc12
-rw-r--r--runtime/mirror/stack_trace_element.h7
-rw-r--r--runtime/mirror/string.cc27
-rw-r--r--runtime/mirror/string.h6
-rw-r--r--runtime/monitor.cc97
-rw-r--r--runtime/monitor.h6
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc66
-rw-r--r--runtime/native/java_lang_Class.cc3
-rw-r--r--runtime/native/java_lang_reflect_Array.cc6
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc16
-rw-r--r--runtime/native/scoped_fast_native_object_access.h8
-rw-r--r--runtime/object_utils.h138
-rw-r--r--runtime/reference_table.cc2
-rw-r--r--runtime/root_visitor.h2
-rw-r--r--runtime/runtime.cc67
-rw-r--r--runtime/runtime.h7
-rw-r--r--runtime/scoped_thread_state_change.h29
-rw-r--r--runtime/sirt_ref.h1
-rw-r--r--runtime/stack.cc5
-rw-r--r--runtime/stack.h38
-rw-r--r--runtime/thread.cc49
-rw-r--r--runtime/thread.h41
-rw-r--r--runtime/thread_list.cc15
-rw-r--r--runtime/thread_list.h3
-rw-r--r--runtime/utils.h2
-rw-r--r--runtime/verifier/method_verifier.cc176
-rw-r--r--runtime/verifier/method_verifier.h27
-rw-r--r--runtime/verifier/reg_type.cc3
-rw-r--r--runtime/verifier/reg_type_cache.cc3
109 files changed, 3920 insertions, 1996 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9cc94e8c0d..4af492bf6e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -513,10 +513,9 @@ void CompilerDriver::CompileAll(jobject class_loader,
}
}
-static DexToDexCompilationLevel GetDexToDexCompilationlevel(mirror::ClassLoader* class_loader,
- const DexFile& dex_file,
- const DexFile::ClassDef& class_def)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static DexToDexCompilationLevel GetDexToDexCompilationlevel(
+ SirtRef<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
+ const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->FindClass(descriptor, class_loader);
@@ -531,7 +530,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(mirror::ClassLoader*
// function). Since image classes can be verified again while compiling an application,
// we must prevent the DEX-to-DEX compiler from introducing them.
// TODO: find a way to enable "quick" instructions for image classes and remove this check.
- bool compiling_image_classes = (class_loader == NULL);
+ bool compiling_image_classes = class_loader.get() == nullptr;
if (compiling_image_classes) {
return kRequired;
} else if (klass->IsVerified()) {
@@ -579,7 +578,8 @@ void CompilerDriver::CompileOne(const mirror::ArtMethod* method, base::TimingLog
{
ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(jclass_loader));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(class_loader, *dex_file, class_def);
}
CompileMethod(code_item, method->GetAccessFlags(), method->GetInvokeType(),
@@ -721,8 +721,8 @@ void CompilerDriver::LoadImageClasses(base::TimingLogger& timings)
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- mirror::DexCache* dex_cache = class_linker->FindDexCache(*dex_file);
- mirror:: ClassLoader* class_loader = NULL;
+ SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
dex_cache, class_loader));
if (klass.get() == NULL) {
@@ -782,15 +782,14 @@ void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) {
const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
gc::Heap* heap = Runtime::Current()->GetHeap();
// TODO: Image spaces only?
+ ScopedObjectAccess soa(Thread::Current());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
- heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this);
+ heap->VisitObjects(FindClinitImageClassesCallback, this);
self->EndAssertNoThreadSuspension(old_cause);
}
}
-bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file,
- uint32_t type_idx) {
+bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) {
if (IsImage() &&
IsImageClass(dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_))) {
if (kIsDebugBuild) {
@@ -815,7 +814,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
if (IsImage()) {
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(dex_file));
Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -903,26 +902,27 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
}
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
- mirror::DexCache* dex_cache,
+ SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// The passed dex_cache is a hint, sanity check before asking the class linker that will take a
// lock.
if (dex_cache->GetDexFile() != mUnit->GetDexFile()) {
- dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+ dex_cache.reset(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
}
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
- const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
+ SirtRef<mirror::ClassLoader>
+ class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ const DexFile::MethodId& referrer_method_id =
+ mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return mUnit->GetClassLinker()->ResolveType(*mUnit->GetDexFile(), referrer_method_id.class_idx_,
dex_cache, class_loader);
}
-static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit,
- uint32_t field_idx)
+static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(
+ ScopedObjectAccess& soa, const DexCompilationUnit* mUnit, uint32_t field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache,
class_loader, false);
}
@@ -932,8 +932,8 @@ static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjec
uint32_t method_idx,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
return mUnit->GetClassLinker()->ResolveMethod(*mUnit->GetDexFile(), method_idx, dex_cache,
class_loader, NULL, type);
}
@@ -947,9 +947,10 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
// Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && !resolved_field->IsStatic()) {
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(),
+ resolved_field->GetDeclaringClass()->GetDexCache());
mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
- mUnit);
+ ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
bool access_ok = referrer_class->CanAccess(fields_class) &&
@@ -997,9 +998,9 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_field->GetDeclaringClass()->GetDexCache());
mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
- mUnit);
+ ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
@@ -1085,7 +1086,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*direct_code = 0;
*direct_method = 0;
bool use_dex_cache = false;
- bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
+ const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
if (compiler_backend_ == kPortable) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
@@ -1198,9 +1199,9 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
}
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_method->GetDeclaringClass()->GetDexCache());
mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(),
- mUnit);
+ ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type);
if (referrer_class != NULL && !icce) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
@@ -1254,10 +1255,8 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
const MethodReference* devirt_map_target =
verifier::MethodVerifier::GetDevirtMap(caller_method, dex_pc);
if (devirt_map_target != NULL) {
- mirror::DexCache* target_dex_cache =
- mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file);
- mirror::ClassLoader* class_loader =
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+ SirtRef<mirror::DexCache> target_dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* called_method =
mUnit->GetClassLinker()->ResolveMethod(*devirt_map_target->dex_file,
devirt_map_target->dex_method_index,
@@ -1509,13 +1508,11 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
if (!SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
ScopedObjectAccess soa(self);
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
- mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
-
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
-
bool resolve_fields_and_methods;
if (klass == NULL) {
// Class couldn't be resolved, for example, super-class is in a different dex file. Don't
@@ -1598,8 +1595,8 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager->GetClassLinker();
const DexFile& dex_file = *manager->GetDexFile();
- mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == NULL) {
@@ -1652,8 +1649,9 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
- mirror::Class* klass = class_linker->FindClass(descriptor,
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ SirtRef<mirror::ClassLoader> class_loader(
+ soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ mirror::Class* klass = class_linker->FindClass(descriptor, class_loader);
if (klass == NULL) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1663,11 +1661,10 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- mirror::DexCache* dex_cache = class_linker->FindDexCache(dex_file);
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
std::string error_msg;
- if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache,
- soa.Decode<mirror::ClassLoader*>(jclass_loader),
- &class_def, true, &error_msg) ==
+ if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
+ &error_msg) ==
verifier::MethodVerifier::kHardFailure) {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
@@ -2124,7 +2121,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(jclass_loader));
mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader);
if (klass != NULL && !SkipClass(jclass_loader, dex_file, klass)) {
@@ -2253,7 +2251,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
ScopedObjectAccess soa(Thread::Current());
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(jclass_loader);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(jclass_loader));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(class_loader, dex_file, class_def);
}
ClassDataItemIterator it(dex_file, class_data);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 9321f06526..9bfea6ff0a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -368,7 +368,8 @@ class CompilerDriver {
ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
- void UpdateImageClasses(base::TimingLogger& timings);
+ void UpdateImageClasses(base::TimingLogger& timings)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index c6687bb4aa..bfc93b3c8f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -78,7 +78,9 @@ class CompilerDriverTest : public CommonTest {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
- mirror::Class* c = class_linker->FindClass(descriptor, soa.Decode<mirror::ClassLoader*>(class_loader));
+ Thread* self = Thread::Current();
+ SirtRef<mirror::ClassLoader> loader(self, soa.Decode<mirror::ClassLoader*>(class_loader));
+ mirror::Class* c = class_linker->FindClass(descriptor, loader);
CHECK(c != NULL);
for (size_t i = 0; i < c->NumDirectMethods(); i++) {
MakeExecutable(c->GetDirectMethod(i));
@@ -142,8 +144,9 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z");
- CompileDirectMethod(NULL, "java.lang.Object", "<init>", "()V");
+ SirtRef<mirror::ClassLoader> null_loader(soa.Self(), nullptr);
+ CompileVirtualMethod(null_loader, "java.lang.Class", "isFinalizable", "()Z");
+ CompileDirectMethod(null_loader, "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
}
ASSERT_TRUE(class_loader != NULL);
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a8b7c881f4..9d9c06401e 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -94,8 +94,8 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_NE(0U, image_header.GetImageBitmapSize());
gc::Heap* heap = Runtime::Current()->GetHeap();
- ASSERT_EQ(1U, heap->GetContinuousSpaces().size());
- gc::space::ContinuousSpace* space = heap->GetContinuousSpaces().front();
+ ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
+ gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != NULL);
ASSERT_TRUE(space->IsDlMallocSpace());
@@ -139,11 +139,8 @@ TEST_F(ImageTest, WriteRead) {
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
- ASSERT_EQ(2U, heap->GetContinuousSpaces().size());
- ASSERT_TRUE(heap->GetContinuousSpaces()[0]->IsImageSpace());
- ASSERT_FALSE(heap->GetContinuousSpaces()[0]->IsDlMallocSpace());
- ASSERT_FALSE(heap->GetContinuousSpaces()[1]->IsImageSpace());
- ASSERT_TRUE(heap->GetContinuousSpaces()[1]->IsDlMallocSpace());
+ ASSERT_TRUE(heap->HasImageSpace());
+ ASSERT_TRUE(heap->GetNonMovingSpace()->IsDlMallocSpace());
gc::space::ImageSpace* image_space = heap->GetImageSpace();
image_space->VerifyImageAllocations();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 75be2c9c43..c22f8d6c01 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -75,8 +75,6 @@ bool ImageWriter::Write(const std::string& image_filename,
image_begin_ = reinterpret_cast<byte*>(image_begin);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const std::vector<DexCache*>& all_dex_caches = class_linker->GetDexCaches();
- dex_caches_.insert(all_dex_caches.begin(), all_dex_caches.end());
UniquePtr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == NULL) {
@@ -121,22 +119,16 @@ bool ImageWriter::Write(const std::string& image_filename,
}
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false); // Remove garbage.
- // Trim size of alloc spaces.
- for (const auto& space : heap->GetContinuousSpaces()) {
- if (space->IsDlMallocSpace()) {
- space->AsDlMallocSpace()->Trim();
- }
- }
if (!AllocMemory()) {
return false;
}
-#ifndef NDEBUG
- { // NOLINT(whitespace/braces)
+
+ if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
CheckNonImageClassesRemoved();
}
-#endif
+
Thread::Current()->TransitionFromSuspendedToRunnable();
size_t oat_loaded_size = 0;
size_t oat_data_offset = 0;
@@ -144,8 +136,6 @@ bool ImageWriter::Write(const std::string& image_filename,
CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset);
CopyAndFixupObjects();
PatchOatCodeAndMethods();
- // Record allocations into the image bitmap.
- RecordImageAllocations();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
UniquePtr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
@@ -178,39 +168,82 @@ bool ImageWriter::Write(const std::string& image_filename,
return true;
}
-void ImageWriter::RecordImageAllocations() {
- uint64_t start_time = NanoTime();
- CHECK(image_bitmap_.get() != nullptr);
- for (const auto& it : offsets_) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + it.second);
- DCHECK_ALIGNED(obj, kObjectAlignment);
- image_bitmap_->Set(obj);
+void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
+ DCHECK(object != nullptr);
+ DCHECK_NE(offset, 0U);
+ DCHECK(!IsImageOffsetAssigned(object));
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
+ DCHECK_ALIGNED(obj, kObjectAlignment);
+ image_bitmap_->Set(obj);
+ // Before we stomp over the lock word, save the hash code for later.
+ Monitor::Deflate(Thread::Current(), object);;
+ LockWord lw(object->GetLockWord());
+ switch (lw.GetState()) {
+ case LockWord::kFatLocked: {
+ LOG(FATAL) << "Fat locked object " << obj << " found during object copy";
+ break;
+ }
+ case LockWord::kThinLocked: {
+ LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
+ break;
+ }
+ case LockWord::kUnlocked:
+ // No hash, don't need to save it.
+ break;
+ case LockWord::kHashCode:
+ saved_hashes_.push_back(std::make_pair(obj, lw.GetHashCode()));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable.";
+ break;
}
- LOG(INFO) << "RecordImageAllocations took " << PrettyDuration(NanoTime() - start_time);
+ object->SetLockWord(LockWord::FromForwardingAddress(offset));
+ DCHECK(IsImageOffsetAssigned(object));
}
-bool ImageWriter::AllocMemory() {
- size_t size = 0;
- for (const auto& space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) {
- if (space->IsDlMallocSpace()) {
- size += space->Size();
- }
- }
+void ImageWriter::AssignImageOffset(mirror::Object* object) {
+ DCHECK(object != nullptr);
+ SetImageOffset(object, image_end_);
+ image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
+ DCHECK_LT(image_end_, image_->Size());
+}
- int prot = PROT_READ | PROT_WRITE;
- size_t length = RoundUp(size, kPageSize);
+bool ImageWriter::IsImageOffsetAssigned(const mirror::Object* object) const {
+ DCHECK(object != nullptr);
+ return object->GetLockWord().GetState() == LockWord::kForwardingAddress;
+}
+
+size_t ImageWriter::GetImageOffset(const mirror::Object* object) const {
+ DCHECK(object != nullptr);
+ DCHECK(IsImageOffsetAssigned(object));
+ LockWord lock_word = object->GetLockWord();
+ size_t offset = lock_word.ForwardingAddress();
+ DCHECK_LT(offset, image_end_);
+ return offset;
+}
+
+bool ImageWriter::AllocMemory() {
+ size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
std::string error_msg;
- image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, prot, &error_msg));
+ image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
+ &error_msg));
if (UNLIKELY(image_.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
}
+
+ // Create the image bitmap.
+ image_bitmap_.reset(gc::accounting::SpaceBitmap::Create("image bitmap", image_->Begin(),
+ length));
+ if (image_bitmap_.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate memory for image bitmap";
+ return false;
+ }
return true;
}
void ImageWriter::ComputeLazyFieldsForImageClasses() {
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
}
@@ -223,13 +256,12 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
- String* string = obj->AsString();
+ mirror::String* string = obj->AsString();
const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
- ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
- for (DexCache* dex_cache : writer->dex_caches_) {
+ for (DexCache* dex_cache : Runtime::Current()->GetClassLinker()->GetDexCaches()) {
const DexFile& dex_file = *dex_cache->GetDexFile();
const DexFile::StringId* string_id = dex_file.FindStringId(utf16_string);
- if (string_id != NULL) {
+ if (string_id != nullptr) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
if (dex_cache->GetResolvedString(string_idx) == NULL) {
@@ -239,13 +271,9 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
}
}
-void ImageWriter::ComputeEagerResolvedStrings()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: Check image spaces only?
- gc::Heap* heap = Runtime::Current()->GetHeap();
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
- heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this);
+void ImageWriter::ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
}
bool ImageWriter::IsImageClass(const Class* klass) {
@@ -278,7 +306,7 @@ void ImageWriter::PruneNonImageClasses() {
// Clear references to removed classes from the DexCaches.
ArtMethod* resolution_method = runtime->GetResolutionMethod();
- for (DexCache* dex_cache : dex_caches_) {
+ for (DexCache* dex_cache : class_linker->GetDexCaches()) {
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
if (klass != NULL && !IsImageClass(klass)) {
@@ -311,31 +339,22 @@ bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
void ImageWriter::CheckNonImageClassesRemoved()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (compiler_driver_.GetImageClasses() == NULL) {
- return;
- }
-
- gc::Heap* heap = Runtime::Current()->GetHeap();
- Thread* self = Thread::Current();
- {
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
+ if (compiler_driver_.GetImageClasses() != nullptr) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
}
-
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->GetLiveBitmap()->Walk(CheckNonImageClassesRemovedCallback, this);
}
void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
- if (!obj->IsClass()) {
- return;
- }
- Class* klass = obj->AsClass();
- if (!image_writer->IsImageClass(klass)) {
- image_writer->DumpImageClasses();
- CHECK(image_writer->IsImageClass(klass)) << ClassHelper(klass).GetDescriptor()
- << " " << PrettyDescriptor(klass);
+ if (obj->IsClass()) {
+ Class* klass = obj->AsClass();
+ if (!image_writer->IsImageClass(klass)) {
+ image_writer->DumpImageClasses();
+ CHECK(image_writer->IsImageClass(klass)) << ClassHelper(klass).GetDescriptor()
+ << " " << PrettyDescriptor(klass);
+ }
}
}
@@ -347,53 +366,50 @@ void ImageWriter::DumpImageClasses() {
}
}
-void ImageWriter::CalculateNewObjectOffsetsCallback(Object* obj, void* arg) {
+void ImageWriter::CalculateObjectOffsets(Object* obj) {
DCHECK(obj != NULL);
- DCHECK(arg != NULL);
- ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
-
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
- if (image_writer->IsImageOffsetAssigned(obj)) {
+ if (IsImageOffsetAssigned(obj)) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- SirtRef<String> interned(Thread::Current(), obj->AsString()->Intern());
- if (obj != interned.get()) {
- if (!image_writer->IsImageOffsetAssigned(interned.get())) {
+ Thread* self = Thread::Current();
+ SirtRef<Object> sirt_obj(self, obj);
+ mirror::String* interned = obj->AsString()->Intern();
+ if (sirt_obj.get() != interned) {
+ if (!IsImageOffsetAssigned(interned)) {
// interned obj is after us, allocate its location early
- image_writer->AssignImageOffset(interned.get());
+ AssignImageOffset(interned);
}
// point those looking for this object to the interned version.
- image_writer->SetImageOffset(obj, image_writer->GetImageOffset(interned.get()));
+ SetImageOffset(sirt_obj.get(), GetImageOffset(interned));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
}
- image_writer->AssignImageOffset(obj);
+ AssignImageOffset(obj);
}
ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- Class* object_array_class = class_linker->FindSystemClass("[Ljava/lang/Object;");
Thread* self = Thread::Current();
+ SirtRef<Class> object_array_class(self, class_linker->FindSystemClass("[Ljava/lang/Object;"));
// build an Object[] of all the DexCaches used in the source_space_
- ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class,
- dex_caches_.size());
+ ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class.get(),
+ class_linker->GetDexCaches().size());
int i = 0;
- for (DexCache* dex_cache : dex_caches_) {
+ for (DexCache* dex_cache : class_linker->GetDexCaches()) {
dex_caches->Set(i++, dex_cache);
}
// build an Object[] of the roots needed to restore the runtime
- SirtRef<ObjectArray<Object> >
- image_roots(self,
- ObjectArray<Object>::Alloc(self, object_array_class,
- ImageHeader::kImageRootsMax));
+ SirtRef<ObjectArray<Object> > image_roots(
+ self, ObjectArray<Object>::Alloc(self, object_array_class.get(), ImageHeader::kImageRootsMax));
image_roots->Set(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
image_roots->Set(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
image_roots->Set(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
@@ -405,24 +421,82 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
image_roots->Set(ImageHeader::kOatLocation,
String::AllocFromModifiedUtf8(self, oat_file_->GetLocation().c_str()));
- image_roots->Set(ImageHeader::kDexCaches,
- dex_caches);
- image_roots->Set(ImageHeader::kClassRoots,
- class_linker->GetClassRoots());
+ image_roots->Set(ImageHeader::kDexCaches, dex_caches);
+ image_roots->Set(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
CHECK(image_roots->Get(i) != NULL);
}
return image_roots.get();
}
+// Walk instance fields of the given Class. Separate function to allow recursion on the super
+// class.
+void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
+ // Visit fields of parent classes first.
+ SirtRef<mirror::Class> sirt_class(Thread::Current(), klass);
+ mirror::Class* super = sirt_class->GetSuperClass();
+ if (super != nullptr) {
+ WalkInstanceFields(obj, super);
+ }
+ //
+ size_t num_reference_fields = sirt_class->NumReferenceInstanceFields();
+ for (size_t i = 0; i < num_reference_fields; ++i) {
+ mirror::ArtField* field = sirt_class->GetInstanceField(i);
+ MemberOffset field_offset = field->GetOffset();
+ mirror::Object* value = obj->GetFieldObject<mirror::Object*>(field_offset, false);
+ if (value != nullptr) {
+ WalkFieldsInOrder(value);
+ }
+ }
+}
+
+// For an unvisited object, visit it then all its children found via fields.
+void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
+ if (!IsImageOffsetAssigned(obj)) {
+ // Walk instance fields of all objects
+ Thread* self = Thread::Current();
+ SirtRef<mirror::Object> sirt_obj(self, obj);
+ SirtRef<mirror::Class> klass(self, obj->GetClass());
+ // visit the object itself.
+ CalculateObjectOffsets(sirt_obj.get());
+ WalkInstanceFields(sirt_obj.get(), klass.get());
+ // Walk static fields of a Class.
+ if (sirt_obj->IsClass()) {
+ size_t num_static_fields = klass->NumReferenceStaticFields();
+ for (size_t i = 0; i < num_static_fields; ++i) {
+ mirror::ArtField* field = klass->GetStaticField(i);
+ MemberOffset field_offset = field->GetOffset();
+ mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object*>(field_offset, false);
+ if (value != nullptr) {
+ WalkFieldsInOrder(value);
+ }
+ }
+ } else if (sirt_obj->IsObjectArray()) {
+ // Walk elements of an object array.
+ int32_t length = sirt_obj->AsObjectArray<mirror::Object>()->GetLength();
+ for (int32_t i = 0; i < length; i++) {
+ mirror::ObjectArray<mirror::Object>* obj_array = sirt_obj->AsObjectArray<mirror::Object>();
+ mirror::Object* value = obj_array->Get(i);
+ if (value != nullptr) {
+ WalkFieldsInOrder(value);
+ }
+ }
+ }
+ }
+}
+
+void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
+ ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
+ DCHECK(writer != nullptr);
+ writer->WalkFieldsInOrder(obj);
+}
+
void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
Thread* self = Thread::Current();
SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
gc::Heap* heap = Runtime::Current()->GetHeap();
- const auto& spaces = heap->GetContinuousSpaces();
- DCHECK(!spaces.empty());
DCHECK_EQ(0U, image_end_);
// Leave space for the header, but do not write it yet, we need to
@@ -431,21 +505,14 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
// TODO: Image spaces only?
- // TODO: Add InOrderWalk to heap bitmap.
const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
- DCHECK(heap->GetLargeObjectsSpace()->GetLiveObjects()->IsEmpty());
- for (const auto& space : spaces) {
- space->GetLiveBitmap()->InOrderWalk(CalculateNewObjectOffsetsCallback, this);
- DCHECK_LT(image_end_, image_->Size());
- }
+ DCHECK_LT(image_end_, image_->Size());
+ // Clear any pre-existing monitors which may have been in the monitor words.
+ heap->VisitObjects(WalkFieldsCallback, this);
self->EndAssertNoThreadSuspension(old);
}
- // Create the image bitmap.
- image_bitmap_.reset(gc::accounting::SpaceBitmap::Create("image bitmap", image_->Begin(),
- image_end_));
const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize);
const byte* oat_file_end = oat_file_begin + oat_loaded_size;
oat_data_begin_ = oat_file_begin + oat_data_offset;
@@ -456,7 +523,8 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
ImageHeader image_header(reinterpret_cast<uint32_t>(image_begin_),
static_cast<uint32_t>(image_end_),
RoundUp(image_end_, kPageSize),
- image_bitmap_->Size(),
+ RoundUp(image_end_ / gc::accounting::SpaceBitmap::kAlignment,
+ sizeof(size_t)),
reinterpret_cast<uint32_t>(GetImageAddress(image_roots.get())),
oat_file_->GetOatHeader().GetChecksum(),
reinterpret_cast<uint32_t>(oat_file_begin),
@@ -477,17 +545,19 @@ void ImageWriter::CopyAndFixupObjects()
heap->DisableObjectValidation();
// TODO: Image spaces only?
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
- heap->GetLiveBitmap()->Walk(CopyAndFixupObjectsCallback, this);
+ heap->VisitObjects(CopyAndFixupObjectsCallback, this);
+ // Fix up the object previously had hash codes.
+ for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
+ hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second));
+ }
+ saved_hashes_.clear();
self->EndAssertNoThreadSuspension(old_cause);
}
-void ImageWriter::CopyAndFixupObjectsCallback(Object* object, void* arg) {
- DCHECK(object != NULL);
+void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
+ DCHECK(obj != NULL);
DCHECK(arg != NULL);
- const Object* obj = object;
ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
-
// see GetLocalAddress for similar computation
size_t offset = image_writer->GetImageOffset(obj);
byte* dst = image_writer->image_->Begin() + offset;
@@ -498,33 +568,7 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* object, void* arg) {
Object* copy = reinterpret_cast<Object*>(dst);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
// word.
- LockWord lw(copy->GetLockWord());
- switch (lw.GetState()) {
- case LockWord::kFatLocked: {
- Monitor* monitor = lw.FatLockMonitor();
- CHECK(monitor != nullptr);
- CHECK(!monitor->IsLocked());
- if (monitor->HasHashCode()) {
- copy->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
- } else {
- copy->SetLockWord(LockWord());
- }
- break;
- }
- case LockWord::kThinLocked: {
- LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
- break;
- }
- case LockWord::kUnlocked:
- break;
- case LockWord::kHashCode:
- // Do nothing since we can just keep the same hash code.
- CHECK_NE(lw.GetHashCode(), 0);
- break;
- default:
- LOG(FATAL) << "Unreachable.";
- break;
- }
+ copy->SetLockWord(LockWord());
image_writer->FixupObject(obj, copy);
}
@@ -629,19 +673,13 @@ void ImageWriter::FixupInstanceFields(const Object* orig, Object* copy) {
DCHECK(copy != NULL);
Class* klass = orig->GetClass();
DCHECK(klass != NULL);
- FixupFields(orig,
- copy,
- klass->GetReferenceInstanceOffsets(),
- false);
+ FixupFields(orig, copy, klass->GetReferenceInstanceOffsets(), false);
}
void ImageWriter::FixupStaticFields(const Class* orig, Class* copy) {
DCHECK(orig != NULL);
DCHECK(copy != NULL);
- FixupFields(orig,
- copy,
- orig->GetReferenceStaticOffsets(),
- true);
+ FixupFields(orig, copy, orig->GetReferenceStaticOffsets(), true);
}
void ImageWriter::FixupFields(const Object* orig,
@@ -693,11 +731,13 @@ void ImageWriter::FixupFields(const Object* orig,
static ArtMethod* GetTargetMethod(const CompilerDriver::PatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- DexCache* dex_cache = class_linker->FindDexCache(patch->GetDexFile());
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
ArtMethod* method = class_linker->ResolveMethod(patch->GetDexFile(),
patch->GetTargetMethodIdx(),
dex_cache,
- NULL,
+ class_loader,
NULL,
patch->GetTargetInvokeType());
CHECK(method != NULL)
@@ -749,15 +789,15 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
// TODO: make this Thumb2 specific
uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(oat_code) & ~0x1);
uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
-#ifndef NDEBUG
- const DexFile::MethodId& id = patch->GetDexFile().GetMethodId(patch->GetTargetMethodIdx());
- uint32_t expected = reinterpret_cast<uint32_t>(&id);
- uint32_t actual = *patch_location;
- CHECK(actual == expected || actual == value) << std::hex
- << "actual=" << actual
- << "expected=" << expected
- << "value=" << value;
-#endif
+ if (kIsDebugBuild) {
+ const DexFile::MethodId& id = patch->GetDexFile().GetMethodId(patch->GetTargetMethodIdx());
+ uint32_t expected = reinterpret_cast<uint32_t>(&id);
+ uint32_t actual = *patch_location;
+ CHECK(actual == expected || actual == value) << std::hex
+ << "actual=" << actual
+ << "expected=" << expected
+ << "value=" << value;
+ }
*patch_location = value;
oat_header.UpdateChecksum(patch_location, sizeof(value));
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 0b408e85cc..695f59b40e 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -63,31 +63,11 @@ class ImageWriter {
void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// We use the lock word to store the offset of the object in the image.
- void AssignImageOffset(mirror::Object* object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(object != NULL);
- SetImageOffset(object, image_end_);
- image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
- DCHECK_LT(image_end_, image_->Size());
- }
-
- void SetImageOffset(mirror::Object* object, size_t offset) {
- DCHECK(object != NULL);
- DCHECK_NE(offset, 0U);
- DCHECK(!IsImageOffsetAssigned(object));
- offsets_.Put(object, offset);
- }
-
- size_t IsImageOffsetAssigned(const mirror::Object* object) const {
- DCHECK(object != NULL);
- return offsets_.find(object) != offsets_.end();
- }
-
- size_t GetImageOffset(const mirror::Object* object) const {
- DCHECK(object != NULL);
- DCHECK(IsImageOffsetAssigned(object));
- return offsets_.find(object)->second;
- }
+ void AssignImageOffset(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImageOffset(mirror::Object* object, size_t offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImageOffsetAssigned(const mirror::Object* object) const;
+ size_t GetImageOffset(const mirror::Object* object) const;
mirror::Object* GetImageAddress(const mirror::Object* object) const {
if (object == NULL) {
@@ -147,7 +127,14 @@ class ImageWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void CalculateNewObjectOffsetsCallback(mirror::Object* obj, void* arg)
+ void CalculateObjectOffsets(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void WalkFieldsInOrder(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void WalkFieldsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
@@ -180,9 +167,6 @@ class ImageWriter {
const CompilerDriver& compiler_driver_;
- // Map of Object to where it will be at runtime.
- SafeMap<const mirror::Object*, size_t> offsets_;
-
// oat file with code for this image
OatFile* oat_file_;
@@ -195,6 +179,9 @@ class ImageWriter {
// Beginning target image address for the output image.
byte* image_begin_;
+ // Saved hashes (objects are inside of the image so that they don't move).
+ std::vector<std::pair<mirror::Object*, uint32_t> > saved_hashes_;
+
// Beginning target oat address for the pointers from the output image to its oat file.
const byte* oat_data_begin_;
@@ -211,9 +198,6 @@ class ImageWriter {
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
-
- // DexCaches seen while scanning for fixing up CodeAndDirectMethods
- std::set<mirror::DexCache*> dex_caches_;
};
} // namespace art
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 667b913039..21dd11ef20 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -48,9 +48,9 @@ class JniCompilerTest : public CommonTest {
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
// Compile the native method before starting the runtime
- mirror::Class* c = class_linker_->FindClass("LMyClassNatives;",
- soa.Decode<mirror::ClassLoader*>(class_loader));
+ mirror::Class* c = class_linker_->FindClass("LMyClassNatives;", loader);
mirror::ArtMethod* method;
if (direct) {
method = c->FindDirectMethod(method_name, method_sig);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 6213b45c41..c423f34f7f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -130,7 +130,8 @@ TEST_F(OatTest, WriteRead) {
num_virtual_methods = it.NumVirtualMethods();
}
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
+ SirtRef<mirror::ClassLoader> loader(Thread::Current(), nullptr);
+ mirror::Class* klass = class_linker->FindClass(descriptor, loader);
UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(i));
CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class->GetStatus()) << descriptor;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f3bb11272e..28fb1479d7 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -405,23 +405,23 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
gc_map_offset = (gc_map_size == 0) ? 0 : offset;
-#if !defined(NDEBUG)
- // We expect GC maps except when the class hasn't been verified or the method is native
- ClassReference class_ref(&dex_file, class_def_index);
- CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
- mirror::Class::Status status;
- if (compiled_class != NULL) {
- status = compiled_class->GetStatus();
- } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) {
- status = mirror::Class::kStatusError;
- } else {
- status = mirror::Class::kStatusNotReady;
+ if (kIsDebugBuild) {
+ // We expect GC maps except when the class hasn't been verified or the method is native
+ ClassReference class_ref(&dex_file, class_def_index);
+ CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
+ mirror::Class::Status status;
+ if (compiled_class != NULL) {
+ status = compiled_class->GetStatus();
+ } else if (verifier::MethodVerifier::IsClassRejected(class_ref)) {
+ status = mirror::Class::kStatusError;
+ } else {
+ status = mirror::Class::kStatusNotReady;
+ }
+ CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
+ << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
+ << (status < mirror::Class::kStatusVerified) << " " << status << " "
+ << PrettyMethod(method_idx, dex_file);
}
- CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
- << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
- << (status < mirror::Class::kStatusVerified) << " " << status << " "
- << PrettyMethod(method_idx, dex_file);
-#endif
// Deduplicate GC maps
SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator gc_map_iter =
@@ -448,11 +448,12 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
if (compiler_driver_->IsImage()) {
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = linker->FindDexCache(dex_file);
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
mirror::ArtMethod* method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
- NULL, NULL, invoke_type);
+ class_loader, nullptr, invoke_type);
CHECK(method != NULL);
method->SetFrameSizeInBytes(frame_size_in_bytes);
method->SetCoreSpillMask(core_spill_mask);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 90276c2678..e219dd33a2 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -257,6 +257,9 @@ class OatDumper {
os << "OAT DEX FILE:\n";
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
+
+ // Create the verifier early.
+
std::string error_msg;
UniquePtr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
if (dex_file.get() == NULL) {
@@ -377,8 +380,20 @@ class OatDumper {
oat_method.GetCode() != NULL ? "..." : "");
Indenter indent2_filter(indent1_os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent2_os(&indent2_filter);
- DumpCode(indent2_os, oat_method, dex_method_idx, &dex_file, class_def, code_item,
- method_access_flags);
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(
+ soa.Self(), runtime->GetClassLinker()->FindDexCache(dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ verifier::MethodVerifier verifier(&dex_file, &dex_cache, &class_loader, &class_def, code_item,
+ dex_method_idx, nullptr, method_access_flags, true, true);
+ verifier.Verify();
+ DumpCode(indent2_os, &verifier, oat_method, code_item);
+ } else {
+ DumpCode(indent2_os, nullptr, oat_method, code_item);
+ }
}
}
@@ -566,24 +581,10 @@ class OatDumper {
}
}
- void DumpVRegsAtDexPc(std::ostream& os, const OatFile::OatMethod& oat_method,
- uint32_t dex_method_idx, const DexFile* dex_file,
- const DexFile::ClassDef& class_def, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags, uint32_t dex_pc) {
- static UniquePtr<verifier::MethodVerifier> verifier;
- static const DexFile* verified_dex_file = NULL;
- static uint32_t verified_dex_method_idx = DexFile::kDexNoIndex;
- if (dex_file != verified_dex_file || verified_dex_method_idx != dex_method_idx) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
- mirror::ClassLoader* class_loader = NULL;
- verifier.reset(new verifier::MethodVerifier(dex_file, dex_cache, class_loader, &class_def,
- code_item, dex_method_idx, NULL,
- method_access_flags, true, true));
- verifier->Verify();
- verified_dex_file = dex_file;
- verified_dex_method_idx = dex_method_idx;
- }
+ void DumpVRegsAtDexPc(std::ostream& os, verifier::MethodVerifier* verifier,
+ const OatFile::OatMethod& oat_method,
+ const DexFile::CodeItem* code_item, uint32_t dex_pc) {
+ DCHECK(verifier != nullptr);
std::vector<int32_t> kinds = verifier->DescribeVRegs(dex_pc);
bool first = true;
for (size_t reg = 0; reg < code_item->registers_size_; reg++) {
@@ -633,18 +634,16 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
- mirror::ClassLoader* class_loader = NULL;
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
class_loader, &class_def, code_item, NULL,
method_access_flags);
}
}
- void DumpCode(std::ostream& os, const OatFile::OatMethod& oat_method,
- uint32_t dex_method_idx, const DexFile* dex_file,
- const DexFile::ClassDef& class_def, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier,
+ const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) {
const void* code = oat_method.GetCode();
size_t code_size = oat_method.GetCodeSize();
if (code == NULL || code_size == 0) {
@@ -653,16 +652,14 @@ class OatDumper {
}
const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(code);
size_t offset = 0;
- const bool kDumpVRegs = (Runtime::Current() != NULL);
while (offset < code_size) {
DumpMappingAtOffset(os, oat_method, offset, false);
offset += disassembler_->Dump(os, native_pc + offset);
uint32_t dex_pc = DumpMappingAtOffset(os, oat_method, offset, true);
if (dex_pc != DexFile::kDexNoIndex) {
DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
- if (kDumpVRegs) {
- DumpVRegsAtDexPc(os, oat_method, dex_method_idx, dex_file, class_def, code_item,
- method_access_flags, dex_pc);
+ if (verifier != nullptr) {
+ DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
}
}
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index bef4381c2b..97cbdd9ab5 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -50,8 +50,10 @@ LIBART_COMMON_SRC_FILES := \
gc/collector/garbage_collector.cc \
gc/collector/mark_sweep.cc \
gc/collector/partial_mark_sweep.cc \
+ gc/collector/semi_space.cc \
gc/collector/sticky_mark_sweep.cc \
gc/heap.cc \
+ gc/space/bump_pointer_space.cc \
gc/space/dlmalloc_space.cc \
gc/space/image_space.cc \
gc/space/large_object_space.cc \
diff --git a/runtime/arch/alloc_entrypoints.S b/runtime/arch/alloc_entrypoints.S
new file mode 100644
index 0000000000..840f3c6197
--- /dev/null
+++ b/runtime/arch/alloc_entrypoints.S
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Called by managed code to allocate an object */
+TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_instrumented, artAllocObjectFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
+/* Called by managed code to allocate an object when the caller doesn't know whether it has access
+ * to the created type. */
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check_instrumented, artAllocObjectFromCodeWithAccessCheckInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
+/* Called by managed code to allocate an array. */
+THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_instrumented, artAllocArrayFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
+/* Called by managed code to allocate an array when the caller doesn't know whether it has access
+ * to the created type. */
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_RESULT_IS_NON_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check_instrumented, artAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
+/* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_instrumented, artCheckAndAllocArrayFromCodeInstrumented, RETURN_IF_RESULT_IS_NON_ZERO
+/* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check_instrumented, artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 1a058ea61e..dbfb93a846 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -830,205 +830,41 @@ ENTRY art_quick_resolve_string
DELIVER_PENDING_EXCEPTION
END art_quick_resolve_string
- /*
- * Called by managed code to allocate an object
- */
- .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl artAllocObjectFromCode @ (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object
-
- .extern artAllocObjectFromCodeInstrumented
-ENTRY art_quick_alloc_object_instrumented
+// Macro to facilitate adding new allocation entrypoints.
+.macro TWO_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
- bl artAllocObjectFromCodeInstrumented @ (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_instrumented
-
- /*
- * Called by managed code to allocate an object when the caller doesn't know whether it has
- * access to the created type.
- */
- .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_with_access_check
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl artAllocObjectFromCodeWithAccessCheck @ (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_with_access_check
-
- .extern artAllocObjectFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_alloc_object_with_access_check_instrumented
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl artAllocObjectFromCodeWithAccessCheckInstrumented @ (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_with_access_check_instrumented
-
- /*
- * Called by managed code to allocate an array.
- */
- .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
- bl artAllocArrayFromCode
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
+ bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
+ \return
DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array
-
- .extern artAllocArrayFromCodeInstrumented
-ENTRY art_quick_alloc_array_instrumented
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
- bl artAllocArrayFromCodeInstrumented
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_instrumented
-
- /*
- * Called by managed code to allocate an array when the caller doesn't know whether it has
- * access to the created type.
- */
- .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_with_access_check
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP)
- bl artAllocArrayFromCodeWithAccessCheck
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_with_access_check
-
- .extern artAllocArrayFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_alloc_array_with_access_check_instrumented
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, component_count, Thread*, SP)
- bl artAllocArrayFromCodeWithAccessCheckInstrumented
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_with_access_check_instrumented
-
- /*
- * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
- */
- .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
- bl artCheckAndAllocArrayFromCode
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array
-
- .extern artCheckAndAllocArrayFromCodeInstrumented
-ENTRY art_quick_check_and_alloc_array_instrumented
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
- bl artCheckAndAllocArrayFromCodeInstrumented
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_instrumented
+END \name
+.endm
- /*
- * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
- */
- .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_with_access_check
+// Macro to facilitate adding new array allocation entrypoints.
+.macro THREE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
mov r12, sp
str r12, [sp, #-16]! @ expand the frame and pass SP
.pad #16
.cfi_adjust_cfa_offset 16
- @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP)
- bl artCheckAndAllocArrayFromCodeWithAccessCheck
+ @ (uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+ bl \entrypoint
add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
+ \return
DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_with_access_check
+END \name
+.endm
- .extern artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
-ENTRY art_quick_check_and_alloc_array_with_access_check_instrumented
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, count, Thread* , SP)
- bl artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO
- DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_with_access_check_instrumented
+#include "arch/alloc_entrypoints.S"
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -1107,11 +943,10 @@ ENTRY art_quick_to_interpreter_bridge
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
add sp, #16 @ skip r1-r3, 4 bytes padding.
.cfi_adjust_cfa_offset -16
- cbnz r2, 1f @ success if no exception is pending
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cbnz r2, 1f @ success if no exception is pending
bx lr @ return on success
1:
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index ee78d45793..decdb500b2 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -401,7 +401,7 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name, 0)
END_MACRO
-MACRO0(RETURN_IF_EAX_NOT_ZERO)
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
ret // return
@@ -426,24 +426,12 @@ MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
DELIVER_PENDING_EXCEPTION
END_MACRO
-TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-
-TWO_ARG_DOWNCALL art_quick_alloc_object_instrumented, artAllocObjectFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check_instrumented, artAllocObjectFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_instrumented, artAllocArrayFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check_instrumented, artAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_instrumented, artCheckAndAllocArrayFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check_instrumented, artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
-
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
+#include "arch/alloc_entrypoints.S"
+
+TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index c0cfee2463..29b39817a8 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -130,7 +130,7 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
// TODO: tighten this check.
if (kDebugLocking) {
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown() ||
+ CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 249f031df0..1c7d744945 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -266,9 +266,8 @@ Mutex::Mutex(const char* name, LockLevel level, bool recursive)
Mutex::~Mutex() {
#if ART_USE_FUTEXES
if (state_ != 0) {
- MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
} else {
CHECK_EQ(exclusive_owner_, 0U) << "unexpectedly found an owner on unlocked mutex " << name_;
@@ -641,9 +640,8 @@ ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
ConditionVariable::~ConditionVariable() {
#if ART_USE_FUTEXES
if (num_waiters_!= 0) {
- MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
<< " called with " << num_waiters_ << " waiters.";
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index feb8a6c6c1..a8750177c5 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -329,6 +329,11 @@ class ConditionVariable {
// TODO: remove this.
void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
+ // Return the number of people that are waiting on this condition.
+ int32_t GetNumWaiters() const NO_THREAD_SAFETY_ANALYSIS {
+ return num_waiters_;
+ }
+
private:
const char* const name_;
// The Mutex being used by waiters. It is an error to mix condition variables between different
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 6df1126e0a..45a546f37e 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -86,6 +86,11 @@ void CumulativeLogger::AddLogger(const base::TimingLogger &logger) {
}
}
+size_t CumulativeLogger::GetIterations() const {
+ MutexLock mu(Thread::Current(), lock_);
+ return iterations_;
+}
+
void CumulativeLogger::Dump(std::ostream &os) {
MutexLock mu(Thread::Current(), lock_);
DumpHistogram(os);
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 07d1ee00e0..501d2d7fd2 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -45,6 +45,7 @@ class CumulativeLogger {
// parent class that is unable to determine the "name" of a sub-class.
void SetName(const std::string& name);
void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_);
+ size_t GetIterations() const;
private:
typedef std::map<std::string, Histogram<uint64_t> *> Histograms;
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 54cbfe6ea5..a84e18acc8 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -205,7 +205,7 @@ class ScopedCheck {
// If java_object is a weak global ref whose referent has been cleared,
// obj will be NULL. Otherwise, obj should always be non-NULL
// and valid.
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
@@ -242,7 +242,7 @@ class ScopedCheck {
void CheckInstanceFieldID(jobject java_object, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+ if (o == NULL || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
@@ -455,7 +455,8 @@ class ScopedCheck {
mirror::Class* c = reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(jc));
if (c == NULL) {
msg += "NULL";
- } else if (c == kInvalidIndirectRefObject || !Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
+ } else if (c == kInvalidIndirectRefObject ||
+ !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
StringAppendF(&msg, "INVALID POINTER:%p", jc);
} else if (!c->IsClass()) {
msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
@@ -621,7 +622,7 @@ class ScopedCheck {
}
mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
@@ -675,7 +676,7 @@ class ScopedCheck {
}
mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
@@ -696,7 +697,7 @@ class ScopedCheck {
return NULL;
}
mirror::ArtField* f = soa_.DecodeField(fid);
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(f) || !f->IsArtField()) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "invalid jfieldID: %p", fid);
return NULL;
@@ -710,7 +711,7 @@ class ScopedCheck {
return NULL;
}
mirror::ArtMethod* m = soa_.DecodeMethod(mid);
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(m) || !m->IsArtMethod()) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
Runtime::Current()->GetHeap()->DumpSpaces();
JniAbortF(function_name_, "invalid jmethodID: %p", mid);
return NULL;
@@ -731,7 +732,7 @@ class ScopedCheck {
}
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
Runtime::Current()->GetHeap()->DumpSpaces();
// TODO: when we remove work_around_app_jni_bugs, this should be impossible.
JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index ad568b1cdb..0436435e65 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -18,20 +18,21 @@
#define ART_RUNTIME_CLASS_LINKER_INL_H_
#include "class_linker.h"
-
#include "mirror/art_field.h"
+#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/iftable.h"
#include "mirror/object_array.h"
+#include "sirt_ref.h"
namespace art {
inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
- const mirror::ArtMethod* referrer) {
+ const mirror::ArtMethod* referrer) {
mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(resolved_string == NULL)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+ SirtRef<mirror::DexCache> dex_cache(Thread::Current(), declaring_class->GetDexCache());
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_string = ResolveString(dex_file, string_idx, dex_cache);
}
@@ -43,8 +44,9 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
if (UNLIKELY(resolved_type == NULL)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
}
@@ -53,10 +55,12 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, const mirror::ArtField* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- mirror::Class* resolved_type = dex_cache->GetResolvedType(type_idx);
+ mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
+ mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
if (UNLIKELY(resolved_type == NULL)) {
- mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, dex_cache_ptr);
+ SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
}
@@ -70,8 +74,9 @@ inline mirror::ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
referrer->GetDexCacheResolvedMethods()->Get(method_idx);
if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
}
@@ -81,12 +86,13 @@ inline mirror::ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx,
const mirror::ArtMethod* referrer,
bool is_static) {
+ mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
+ declaring_class->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == NULL)) {
- mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- mirror::ClassLoader* class_loader = declaring_class->GetClassLoader();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 184e5d4be9..cfe3bf4c0f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -170,20 +170,6 @@ const char* ClassLinker::class_roots_descriptors_[] = {
"[Ljava/lang/StackTraceElement;",
};
-ClassLinker* ClassLinker::CreateFromCompiler(const std::vector<const DexFile*>& boot_class_path,
- InternTable* intern_table) {
- CHECK_NE(boot_class_path.size(), 0U);
- UniquePtr<ClassLinker> class_linker(new ClassLinker(intern_table));
- class_linker->InitFromCompiler(boot_class_path);
- return class_linker.release();
-}
-
-ClassLinker* ClassLinker::CreateFromImage(InternTable* intern_table) {
- UniquePtr<ClassLinker> class_linker(new ClassLinker(intern_table));
- class_linker->InitFromImage();
- return class_linker.release();
-}
-
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
: dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
@@ -211,14 +197,16 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// java_lang_Class comes first, it's needed for AllocClass
Thread* self = Thread::Current();
gc::Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<mirror::Class>
- java_lang_Class(self,
- down_cast<mirror::Class*>(heap->AllocObject(self, NULL,
- sizeof(mirror::ClassClass))));
+ // The GC can't handle an object with a null class since we can't get the size of this object.
+ heap->IncrementDisableGC(self);
+ SirtRef<mirror::Class> java_lang_Class(
+ self, down_cast<mirror::Class*>(
+ heap->AllocNonMovableObject(self, NULL, sizeof(mirror::ClassClass))));
CHECK(java_lang_Class.get() != NULL);
mirror::Class::SetClassClass(java_lang_Class.get());
java_lang_Class->SetClass(java_lang_Class.get());
java_lang_Class->SetClassSize(sizeof(mirror::ClassClass));
+ heap->DecrementDisableGC(self);
// AllocClass(mirror::Class*) can now be used
// Class[] is used for reflection support.
@@ -401,7 +389,7 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
array_iftable_->SetInterface(1, java_io_Serializable);
// Sanity check Class[] and Object[]'s interfaces.
- ClassHelper kh(class_array_class.get(), this);
+ ClassHelper kh(class_array_class.get());
CHECK_EQ(java_lang_Cloneable, kh.GetDirectInterface(0));
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
kh.ChangeClass(object_array_class.get());
@@ -487,7 +475,7 @@ void ClassLinker::FinishInit() {
FindSystemClass("Ljava/lang/ref/FinalizerReference;");
mirror::ArtField* pendingNext = java_lang_ref_Reference->GetInstanceField(0);
- FieldHelper fh(pendingNext, this);
+ FieldHelper fh(pendingNext);
CHECK_STREQ(fh.GetName(), "pendingNext");
CHECK_STREQ(fh.GetTypeDescriptor(), "Ljava/lang/ref/Reference;");
@@ -1043,6 +1031,7 @@ void ClassLinker::InitFromImage() {
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
+ Thread* self = Thread::Current();
gc::Heap* heap = Runtime::Current()->GetHeap();
gc::space::ImageSpace* space = heap->GetImageSpace();
dex_cache_image_class_lookup_required_ = true;
@@ -1059,9 +1048,10 @@ void ClassLinker::InitFromImage() {
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
- mirror::ObjectArray<mirror::Class>* class_roots =
- space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>();
- class_roots_ = class_roots;
+ SirtRef<mirror::ObjectArray<mirror::Class> > class_roots(
+ self,
+ space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>());
+ class_roots_ = class_roots.get();
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
@@ -1069,7 +1059,6 @@ void ClassLinker::InitFromImage() {
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
- Thread* self = Thread::Current();
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
SirtRef<mirror::DexCache> dex_cache(self, dex_caches->Get(i));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
@@ -1096,13 +1085,12 @@ void ClassLinker::InitFromImage() {
// Set entry point to interpreter if in InterpretOnly mode.
if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap->FlushAllocStack();
- heap->GetLiveBitmap()->Walk(InitFromImageInterpretOnlyCallback, this);
+ heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
- class_roots_ = class_roots;
+ class_roots_ = class_roots.get();
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
@@ -1192,7 +1180,6 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* ar
}
}
-
ClassLinker::~ClassLinker() {
mirror::Class::ResetClass();
mirror::String::ResetClass();
@@ -1214,10 +1201,10 @@ ClassLinker::~ClassLinker() {
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- mirror::Class* dex_cache_class = GetClassRoot(kJavaLangDexCache);
- SirtRef<mirror::DexCache> dex_cache(self,
- down_cast<mirror::DexCache*>(heap->AllocObject(self, dex_cache_class,
- dex_cache_class->GetObjectSize())));
+ SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
+ SirtRef<mirror::DexCache> dex_cache(
+ self, down_cast<mirror::DexCache*>(
+ heap->AllocObject(self, dex_cache_class.get(), dex_cache_class->GetObjectSize())));
if (dex_cache.get() == NULL) {
return NULL;
}
@@ -1253,13 +1240,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
return NULL;
}
- dex_cache->Init(&dex_file,
- location.get(),
- strings.get(),
- types.get(),
- methods.get(),
- fields.get(),
- initialized_static_storage.get());
+ dex_cache->Init(&dex_file, location.get(), strings.get(), types.get(), methods.get(),
+ fields.get(), initialized_static_storage.get());
return dex_cache.get();
}
@@ -1267,7 +1249,7 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Cl
size_t class_size) {
DCHECK_GE(class_size, sizeof(mirror::Class));
gc::Heap* heap = Runtime::Current()->GetHeap();
- mirror::Object* k = heap->AllocObject(self, java_lang_Class, class_size);
+ mirror::Object* k = heap->AllocNonMovableObject(self, java_lang_Class, class_size);
if (UNLIKELY(k == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return NULL;
@@ -1285,18 +1267,19 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, size_t class_size) {
}
mirror::ArtField* ClassLinker::AllocArtField(Thread* self) {
- return down_cast<mirror::ArtField*>(GetClassRoot(kJavaLangReflectArtField)->AllocObject(self));
+ return down_cast<mirror::ArtField*>(
+ GetClassRoot(kJavaLangReflectArtField)->Alloc<false, true>(self));
}
mirror::ArtMethod* ClassLinker::AllocArtMethod(Thread* self) {
- return down_cast<mirror::ArtMethod*>(GetClassRoot(kJavaLangReflectArtMethod)->AllocObject(self));
+ return down_cast<mirror::ArtMethod*>(
+ GetClassRoot(kJavaLangReflectArtMethod)->Alloc<false, true>(self));
}
-mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(Thread* self,
- size_t length) {
- return mirror::ObjectArray<mirror::StackTraceElement>::Alloc(self,
- GetClassRoot(kJavaLangStackTraceElementArrayClass),
- length);
+mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray(
+ Thread* self, size_t length) {
+ return mirror::ObjectArray<mirror::StackTraceElement>::Alloc(
+ self, GetClassRoot(kJavaLangStackTraceElementArrayClass), length);
}
static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass)
@@ -1332,10 +1315,12 @@ bool ClassLinker::IsInBootClassPath(const char* descriptor) {
}
mirror::Class* ClassLinker::FindSystemClass(const char* descriptor) {
- return FindClass(descriptor, NULL);
+ SirtRef<mirror::ClassLoader> class_loader(Thread::Current(), nullptr);
+ return FindClass(descriptor, class_loader);
}
-mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoader* class_loader) {
+mirror::Class* ClassLinker::FindClass(const char* descriptor,
+ SirtRef<mirror::ClassLoader>& class_loader) {
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
Thread* self = Thread::Current();
DCHECK(self != NULL);
@@ -1346,20 +1331,19 @@ mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoade
return FindPrimitiveClass(descriptor[0]);
}
// Find the class in the loaded classes table.
- mirror::Class* klass = LookupClass(descriptor, class_loader);
+ mirror::Class* klass = LookupClass(descriptor, class_loader.get());
if (klass != NULL) {
return EnsureResolved(self, klass);
}
// Class is not yet loaded.
if (descriptor[0] == '[') {
return CreateArrayClass(descriptor, class_loader);
-
- } else if (class_loader == NULL) {
+ } else if (class_loader.get() == nullptr) {
DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, boot_class_path_);
if (pair.second != NULL) {
- return DefineClass(descriptor, NULL, *pair.first, *pair.second);
+ SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
}
-
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
// First try the boot class path, we check the descriptor first to avoid an unnecessary
// throw of a NoClassDefFoundError.
@@ -1372,7 +1356,8 @@ mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoade
const std::vector<const DexFile*>* class_path;
{
ScopedObjectAccessUnchecked soa(self);
- ScopedLocalRef<jobject> jclass_loader(soa.Env(), soa.AddLocalReference<jobject>(class_loader));
+ ScopedLocalRef<jobject> jclass_loader(soa.Env(),
+ soa.AddLocalReference<jobject>(class_loader.get()));
class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get());
}
@@ -1384,7 +1369,7 @@ mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoade
} else {
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader));
+ soa.AddLocalReference<jobject>(class_loader.get()));
std::string class_name_string(DescriptorToDot(descriptor));
ScopedLocalRef<jobject> result(soa.Env(), NULL);
{
@@ -1418,7 +1403,7 @@ mirror::Class* ClassLinker::FindClass(const char* descriptor, mirror::ClassLoade
}
mirror::Class* ClassLinker::DefineClass(const char* descriptor,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
Thread* self = Thread::Current();
@@ -1449,7 +1434,7 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
return NULL;
}
klass->SetDexCache(FindDexCache(dex_file));
- LoadClass(dex_file, dex_class_def, klass, class_loader);
+ LoadClass(dex_file, dex_class_def, klass, class_loader.get());
// Check for a pending exception during load
if (self->IsExceptionPending()) {
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -1457,14 +1442,12 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
}
ObjectLock lock(self, klass.get());
klass->SetClinitThreadId(self->GetTid());
- {
- // Add the newly loaded class to the loaded classes table.
- mirror::Class* existing = InsertClass(descriptor, klass.get(), Hash(descriptor));
- if (existing != NULL) {
- // We failed to insert because we raced with another thread. Calling EnsureResolved may cause
- // this thread to block.
- return EnsureResolved(self, existing);
- }
+ // Add the newly loaded class to the loaded classes table.
+ mirror::Class* existing = InsertClass(descriptor, klass.get(), Hash(descriptor));
+ if (existing != NULL) {
+ // We failed to insert because we raced with another thread. Calling EnsureResolved may cause
+ // this thread to block.
+ return EnsureResolved(self, existing);
}
// Finish loading (if necessary) by finding parents
CHECK(!klass->IsLoaded());
@@ -1476,7 +1459,9 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
CHECK(klass->IsLoaded());
// Link the class (if necessary)
CHECK(!klass->IsResolved());
- if (!LinkClass(klass, NULL, self)) {
+ // TODO: Use fast jobjects?
+ SirtRef<mirror::ObjectArray<mirror::Class> > interfaces(self, nullptr);
+ if (!LinkClass(self, klass, interfaces)) {
// Linking failed.
klass->SetStatus(mirror::Class::kStatusError, self);
return NULL;
@@ -2083,7 +2068,7 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
//
// Returns NULL with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(const char* descriptor,
- mirror::ClassLoader* class_loader) {
+ SirtRef<mirror::ClassLoader>& class_loader) {
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
mirror::Class* component_type = FindClass(descriptor + 1, class_loader);
@@ -2109,7 +2094,7 @@ mirror::Class* ClassLinker::CreateArrayClass(const char* descriptor,
// because we effectively do this lookup again when we add the new
// class to the hash table --- necessary because of possible races with
// other threads.)
- if (class_loader != component_type->GetClassLoader()) {
+ if (class_loader.get() != component_type->GetClassLoader()) {
mirror::Class* new_class = LookupClass(descriptor, component_type->GetClassLoader());
if (new_class != NULL) {
return new_class;
@@ -2266,11 +2251,10 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- ClassHelper kh;
for (auto it = class_table_.lower_bound(hash), end = class_table_.end(); it != end && it->first == hash;
++it) {
mirror::Class* klass = it->second;
- kh.ChangeClass(klass);
+ ClassHelper kh(klass);
if ((klass->GetClassLoader() == class_loader) &&
(strcmp(descriptor, kh.GetDescriptor()) == 0)) {
class_table_.erase(it);
@@ -2313,18 +2297,17 @@ mirror::Class* ClassLinker::LookupClass(const char* descriptor,
mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
const mirror::ClassLoader* class_loader,
size_t hash) {
- ClassHelper kh(NULL, this);
auto end = class_table_.end();
for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
- kh.ChangeClass(klass);
+ ClassHelper kh(klass);
if ((klass->GetClassLoader() == class_loader) &&
(strcmp(descriptor, kh.GetDescriptor()) == 0)) {
if (kIsDebugBuild) {
// Check for duplicates in the table.
for (++it; it != end && it->first == hash; ++it) {
mirror::Class* klass2 = it->second;
- kh.ChangeClass(klass2);
+ ClassHelper kh(klass2);
CHECK(!((klass2->GetClassLoader() == class_loader) &&
(strcmp(descriptor, kh.GetDescriptor()) == 0)))
<< PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
@@ -2354,14 +2337,13 @@ void ClassLinker::MoveImageClassesToClassTable() {
const char* old_no_suspend_cause =
self->StartAssertNoThreadSuspension("Moving image classes to class table");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
- ClassHelper kh(NULL, this);
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes();
for (int32_t j = 0; j < types->GetLength(); j++) {
mirror::Class* klass = types->Get(j);
if (klass != NULL) {
- kh.ChangeClass(klass);
+ ClassHelper kh(klass);
DCHECK(klass->GetClassLoader() == NULL);
const char* descriptor = kh.GetDescriptor();
size_t hash = Hash(descriptor);
@@ -2429,11 +2411,10 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
}
size_t hash = Hash(descriptor);
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- ClassHelper kh(NULL, this);
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
- kh.ChangeClass(klass);
+ ClassHelper kh(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
result.push_back(klass);
}
@@ -2687,12 +2668,10 @@ static void CheckProxyConstructor(mirror::ArtMethod* constructor);
static void CheckProxyMethod(mirror::ArtMethod* method,
SirtRef<mirror::ArtMethod>& prototype);
-mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
- mirror::ObjectArray<mirror::Class>* interfaces,
- mirror::ClassLoader* loader,
- mirror::ObjectArray<mirror::ArtMethod>* methods,
- mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws) {
- Thread* self = Thread::Current();
+mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring name,
+ jobjectArray interfaces, jobject loader,
+ jobjectArray methods, jobjectArray throws) {
+ Thread* self = soa.Self();
SirtRef<mirror::Class> klass(self, AllocClass(self, GetClassRoot(kJavaLangClass),
sizeof(mirror::SynthesizedProxyClass)));
if (klass.get() == NULL) {
@@ -2702,9 +2681,9 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
DCHECK(klass->GetClass() != NULL);
klass->SetObjectSize(sizeof(mirror::Proxy));
klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal);
- klass->SetClassLoader(loader);
+ klass->SetClassLoader(soa.Decode<mirror::ClassLoader*>(loader));
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetName(name);
+ klass->SetName(soa.Decode<mirror::String*>(name));
mirror::Class* proxy_class = GetClassRoot(kJavaLangReflectProxy);
klass->SetDexCache(proxy_class->GetDexCache());
klass->SetStatus(mirror::Class::kStatusIdx, self);
@@ -2742,8 +2721,7 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
// Proxies have 1 direct method, the constructor
{
- mirror::ObjectArray<mirror::ArtMethod>* directs =
- AllocArtMethodArray(self, 1);
+ mirror::ObjectArray<mirror::ArtMethod>* directs = AllocArtMethodArray(self, 1);
if (UNLIKELY(directs == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return NULL;
@@ -2757,11 +2735,11 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
klass->SetDirectMethod(0, constructor);
}
- // Create virtual method using specified prototypes
- size_t num_virtual_methods = methods->GetLength();
+ // Create virtual method using specified prototypes.
+ size_t num_virtual_methods =
+ soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods)->GetLength();
{
- mirror::ObjectArray<mirror::ArtMethod>* virtuals =
- AllocArtMethodArray(self, num_virtual_methods);
+ mirror::ObjectArray<mirror::ArtMethod>* virtuals = AllocArtMethodArray(self, num_virtual_methods);
if (UNLIKELY(virtuals == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return NULL;
@@ -2769,7 +2747,9 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
klass->SetVirtualMethods(virtuals);
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
- SirtRef<mirror::ArtMethod> prototype(self, methods->Get(i));
+ mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
+ soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
+ SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
if (UNLIKELY(clone == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -2785,13 +2765,15 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
{
ObjectLock lock(self, klass.get()); // Must hold lock on object when resolved.
// Link the fields and virtual methods, creating vtable and iftables
- if (!LinkClass(klass, interfaces, self)) {
+ SirtRef<mirror::ObjectArray<mirror::Class> > sirt_interfaces(
+ self, soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+ if (!LinkClass(self, klass, sirt_interfaces)) {
klass->SetStatus(mirror::Class::kStatusError, self);
return NULL;
}
- interfaces_sfield->SetObject(klass.get(), interfaces);
- throws_sfield->SetObject(klass.get(), throws);
+ interfaces_sfield->SetObject(klass.get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+ throws_sfield->SetObject(klass.get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
klass->SetStatus(mirror::Class::kStatusInitialized, self);
}
@@ -2800,22 +2782,25 @@ mirror::Class* ClassLinker::CreateProxyClass(mirror::String* name,
CHECK(klass->GetIFields() == NULL);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- SirtRef<mirror::ArtMethod> prototype(self, methods->Get(i));
+ mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
+ soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
+ SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
CheckProxyMethod(klass->GetVirtualMethod(i), prototype);
}
+ mirror::String* decoded_name = soa.Decode<mirror::String*>(name);
std::string interfaces_field_name(StringPrintf("java.lang.Class[] %s.interfaces",
- name->ToModifiedUtf8().c_str()));
+ decoded_name->ToModifiedUtf8().c_str()));
CHECK_EQ(PrettyField(klass->GetStaticField(0)), interfaces_field_name);
std::string throws_field_name(StringPrintf("java.lang.Class[][] %s.throws",
- name->ToModifiedUtf8().c_str()));
+ decoded_name->ToModifiedUtf8().c_str()));
CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
mirror::SynthesizedProxyClass* synth_proxy_class =
down_cast<mirror::SynthesizedProxyClass*>(klass.get());
- CHECK_EQ(synth_proxy_class->GetInterfaces(), interfaces);
- CHECK_EQ(synth_proxy_class->GetThrows(), throws);
+ CHECK_EQ(synth_proxy_class->GetInterfaces(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+ CHECK_EQ(synth_proxy_class->GetThrows(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
}
std::string descriptor(GetDescriptorForProxy(klass.get()));
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.get(), Hash(descriptor.c_str()));
@@ -2977,6 +2962,10 @@ static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
return true;
}
+bool ClassLinker::IsInitialized() const {
+ return init_done_;
+}
+
bool ClassLinker::InitializeClass(mirror::Class* klass, bool can_init_statics,
bool can_init_parents) {
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
@@ -3084,7 +3073,9 @@ bool ClassLinker::InitializeClass(mirror::Class* klass, bool can_init_statics,
const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
CHECK(dex_class_def != NULL);
const DexFile& dex_file = kh.GetDexFile();
- EncodedStaticFieldValueIterator it(dex_file, kh.GetDexCache(), klass->GetClassLoader(),
+ SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
+ SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
+ EncodedStaticFieldValueIterator it(dex_file, &dex_cache, &class_loader,
this, *dex_class_def);
if (it.HasNext()) {
CHECK(can_init_statics);
@@ -3196,12 +3187,11 @@ bool ClassLinker::ValidateSuperClassDescriptors(const mirror::Class* klass) {
}
}
}
- mirror::IfTable* iftable = klass->GetIfTable();
for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- mirror::Class* interface = iftable->GetInterface(i);
+ mirror::Class* interface = klass->GetIfTable()->GetInterface(i);
if (klass->GetClassLoader() != interface->GetClassLoader()) {
for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) {
- const mirror::ArtMethod* method = iftable->GetMethodArray(i)->Get(j);
+ const mirror::ArtMethod* method = klass->GetIfTable()->GetMethodArray(i)->Get(j);
if (!IsSameMethodSignatureInDifferentClassContexts(method, interface,
method->GetDeclaringClass())) {
ThrowLinkageError(klass, "Class %s method %s resolves differently in interface %s",
@@ -3259,11 +3249,14 @@ bool ClassLinker::IsSameDescriptorInDifferentClassContexts(const char* descripto
if (klass1 == klass2) {
return true;
}
- mirror::Class* found1 = FindClass(descriptor, klass1->GetClassLoader());
+ Thread* self = Thread::Current();
+ SirtRef<mirror::ClassLoader> class_loader1(self, klass1->GetClassLoader());
+ mirror::Class* found1 = FindClass(descriptor, class_loader1);
if (found1 == NULL) {
Thread::Current()->ClearException();
}
- mirror::Class* found2 = FindClass(descriptor, klass2->GetClassLoader());
+ SirtRef<mirror::ClassLoader> class_loader2(self, klass2->GetClassLoader());
+ mirror::Class* found2 = FindClass(descriptor, class_loader2);
if (found2 == NULL) {
Thread::Current()->ClearException();
}
@@ -3285,17 +3278,20 @@ bool ClassLinker::EnsureInitialized(mirror::Class* c, bool can_init_fields, bool
}
void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- mirror::Class* c, SafeMap<uint32_t, mirror::ArtField*>& field_map) {
- mirror::ClassLoader* cl = c->GetClassLoader();
+ mirror::Class* c,
+ SafeMap<uint32_t, mirror::ArtField*>& field_map) {
const byte* class_data = dex_file.GetClassData(dex_class_def);
ClassDataItemIterator it(dex_file, class_data);
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, c->GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, c->GetClassLoader());
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- field_map.Put(i, ResolveField(dex_file, it.GetMemberIndex(), c->GetDexCache(), cl, true));
+ field_map.Put(i, ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, true));
}
}
-bool ClassLinker::LinkClass(SirtRef<mirror::Class>& klass,
- mirror::ObjectArray<mirror::Class>* interfaces, Thread* self) {
+bool ClassLinker::LinkClass(Thread* self, SirtRef<mirror::Class>& klass,
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!LinkSuperClass(klass)) {
return false;
@@ -3419,7 +3415,7 @@ bool ClassLinker::LinkSuperClass(SirtRef<mirror::Class>& klass) {
// Populate the class vtable and itable. Compute return type indices.
bool ClassLinker::LinkMethods(SirtRef<mirror::Class>& klass,
- mirror::ObjectArray<mirror::Class>* interfaces) {
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
@@ -3453,15 +3449,13 @@ bool ClassLinker::LinkVirtualMethods(SirtRef<mirror::Class>& klass) {
return false;
}
// See if any of our virtual methods override the superclass.
- MethodHelper local_mh(NULL, this);
- MethodHelper super_mh(NULL, this);
for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) {
mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
- local_mh.ChangeMethod(local_method);
+ MethodHelper local_mh(local_method);
size_t j = 0;
for (; j < actual_count; ++j) {
mirror::ArtMethod* super_method = vtable->Get(j);
- super_mh.ChangeMethod(super_method);
+ MethodHelper super_mh(super_method);
if (local_mh.HasSameNameAndSignature(&super_mh)) {
if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) {
if (super_method->IsFinal()) {
@@ -3525,7 +3519,7 @@ bool ClassLinker::LinkVirtualMethods(SirtRef<mirror::Class>& klass) {
}
bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
- mirror::ObjectArray<mirror::Class>* interfaces) {
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
// Set the imt table to be all conflicts by default.
klass->SetImTable(Runtime::Current()->GetDefaultImt());
size_t super_ifcount;
@@ -3535,11 +3529,13 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
super_ifcount = 0;
}
size_t ifcount = super_ifcount;
- ClassHelper kh(klass.get(), this);
- uint32_t num_interfaces = interfaces == NULL ? kh.NumDirectInterfaces() : interfaces->GetLength();
+ ClassHelper kh(klass.get());
+ uint32_t num_interfaces =
+ interfaces.get() == nullptr ? kh.NumDirectInterfaces() : interfaces->GetLength();
ifcount += num_interfaces;
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ mirror::Class* interface =
+ interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
ifcount += interface->GetIfTableCount();
}
if (ifcount == 0) {
@@ -3580,7 +3576,8 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
// Flatten the interface inheritance hierarchy.
size_t idx = super_ifcount;
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = interfaces == NULL ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ mirror::Class* interface =
+ interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
DCHECK(interface != NULL);
if (!interface->IsInterface()) {
ClassHelper ih(interface);
@@ -3643,20 +3640,21 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
return false;
}
std::vector<mirror::ArtMethod*> miranda_list;
- MethodHelper vtable_mh(NULL, this);
- MethodHelper interface_mh(NULL, this);
+ MethodHelper vtable_mh(NULL);
+ MethodHelper interface_mh(NULL);
for (size_t i = 0; i < ifcount; ++i) {
mirror::Class* interface = iftable->GetInterface(i);
size_t num_methods = interface->NumVirtualMethods();
if (num_methods > 0) {
- mirror::ObjectArray<mirror::ArtMethod>* method_array =
- AllocArtMethodArray(self, num_methods);
- if (UNLIKELY(method_array == NULL)) {
+ SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
+ method_array(self, AllocArtMethodArray(self, num_methods));
+ if (UNLIKELY(method_array.get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- iftable->SetMethodArray(i, method_array);
- mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
+ iftable->SetMethodArray(i, method_array.get());
+ SirtRef<mirror::ObjectArray<mirror::ArtMethod> > vtable(self,
+ klass->GetVTableDuringLinking());
for (size_t j = 0; j < num_methods; ++j) {
mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j);
interface_mh.ChangeMethod(interface_method);
@@ -3709,10 +3707,7 @@ bool ClassLinker::LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
-#ifdef MOVING_GARBAGE_COLLECTOR
// TODO: If a methods move then the miranda_list may hold stale references.
- UNIMPLEMENTED(FATAL);
-#endif
miranda_list.push_back(miranda_method.get());
}
method_array->Set(j, miranda_method.get());
@@ -3791,17 +3786,16 @@ bool ClassLinker::LinkStaticFields(SirtRef<mirror::Class>& klass) {
}
struct LinkFieldsComparator {
- explicit LinkFieldsComparator(FieldHelper* fh)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : fh_(fh) {}
+ explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ }
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
bool operator()(const mirror::ArtField* field1, const mirror::ArtField* field2)
NO_THREAD_SAFETY_ANALYSIS {
// First come reference fields, then 64-bit, and finally 32-bit
- fh_->ChangeField(field1);
- Primitive::Type type1 = fh_->GetTypeAsPrimitiveType();
- fh_->ChangeField(field2);
- Primitive::Type type2 = fh_->GetTypeAsPrimitiveType();
+ FieldHelper fh1(field1);
+ Primitive::Type type1 = fh1.GetTypeAsPrimitiveType();
+ FieldHelper fh2(field2);
+ Primitive::Type type2 = fh2.GetTypeAsPrimitiveType();
bool isPrimitive1 = type1 != Primitive::kPrimNot;
bool isPrimitive2 = type2 != Primitive::kPrimNot;
bool is64bit1 = isPrimitive1 && (type1 == Primitive::kPrimLong || type1 == Primitive::kPrimDouble);
@@ -3813,14 +3807,10 @@ struct LinkFieldsComparator {
}
// same basic group? then sort by string.
- fh_->ChangeField(field1);
- const char* name1 = fh_->GetName();
- fh_->ChangeField(field2);
- const char* name2 = fh_->GetName();
+ const char* name1 = fh1.GetName();
+ const char* name2 = fh2.GetName();
return strcmp(name1, name2) < 0;
}
-
- FieldHelper* fh_;
};
bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
@@ -3855,17 +3845,15 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
CHECK(f != NULL);
grouped_and_sorted_fields.push_back(f);
}
- FieldHelper fh(NULL, this);
- std::sort(grouped_and_sorted_fields.begin(),
- grouped_and_sorted_fields.end(),
- LinkFieldsComparator(&fh));
+ std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(),
+ LinkFieldsComparator());
// References should be at the front.
size_t current_field = 0;
size_t num_reference_fields = 0;
for (; current_field < num_fields; current_field++) {
mirror::ArtField* field = grouped_and_sorted_fields.front();
- fh.ChangeField(field);
+ FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
bool isPrimitive = type != Primitive::kPrimNot;
if (isPrimitive) {
@@ -3884,7 +3872,7 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) {
for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) {
mirror::ArtField* field = grouped_and_sorted_fields[i];
- fh.ChangeField(field);
+ FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
CHECK(type != Primitive::kPrimNot); // should only be working on primitive types
if (type == Primitive::kPrimLong || type == Primitive::kPrimDouble) {
@@ -3906,7 +3894,7 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
while (!grouped_and_sorted_fields.empty()) {
mirror::ArtField* field = grouped_and_sorted_fields.front();
grouped_and_sorted_fields.pop_front();
- fh.ChangeField(field);
+ FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
CHECK(type != Primitive::kPrimNot); // should only be working on primitive types
fields->Set(current_field, field);
@@ -3920,11 +3908,11 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
// We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
if (!is_static &&
- (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get(), this).GetDescriptor()) == 0)) {
+ (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)) {
// We know there are no non-reference fields in the Reference classes, and we know
// that 'referent' is alphabetically last, so this is easy...
CHECK_EQ(num_reference_fields, num_fields);
- fh.ChangeField(fields->Get(num_fields - 1));
+ FieldHelper fh(fields->Get(num_fields - 1));
CHECK_STREQ(fh.GetName(), "referent");
--num_reference_fields;
}
@@ -3942,10 +3930,10 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
<< " offset=" << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()),
false);
}
- fh.ChangeField(field);
+ FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
bool is_primitive = type != Primitive::kPrimNot;
- if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get(), this).GetDescriptor()) == 0)
+ if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)
&& (strcmp("referent", fh.GetName()) == 0)) {
is_primitive = true; // We lied above, so we have to expect a lie here.
}
@@ -3970,7 +3958,7 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.get(), this).GetDescriptor();
+ DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.get()).GetDescriptor();
size_t previous_size = klass->GetObjectSize();
if (previous_size != 0) {
// Make sure that we didn't originally have an incorrect size.
@@ -4034,9 +4022,9 @@ void ClassLinker::CreateReferenceOffsets(SirtRef<mirror::Class>& klass, bool is_
}
}
-mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
- uint32_t string_idx, mirror::DexCache* dex_cache) {
- DCHECK(dex_cache != NULL);
+mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
+ SirtRef<mirror::DexCache>& dex_cache) {
+ DCHECK(dex_cache.get() != nullptr);
mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != NULL) {
return resolved;
@@ -4048,11 +4036,18 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
return string;
}
-mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
- uint16_t type_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader) {
- DCHECK(dex_cache != NULL);
+mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
+ const mirror::Class* referrer) {
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, referrer->GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, referrer->GetClassLoader());
+ return ResolveType(dex_file, type_idx, dex_cache, class_loader);
+}
+
+mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.get() != NULL);
mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == NULL) {
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
@@ -4082,11 +4077,11 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const mirror::ArtMethod* referrer,
InvokeType type) {
- DCHECK(dex_cache != NULL);
+ DCHECK(dex_cache.get() != NULL);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != NULL && !resolved->IsRuntimeMethod()) {
@@ -4104,15 +4099,15 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
switch (type) {
case kDirect: // Fall-through.
case kStatic:
- resolved = klass->FindDirectMethod(dex_cache, method_idx);
+ resolved = klass->FindDirectMethod(dex_cache.get(), method_idx);
break;
case kInterface:
- resolved = klass->FindInterfaceMethod(dex_cache, method_idx);
+ resolved = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
- resolved = klass->FindVirtualMethod(dex_cache, method_idx);
+ resolved = klass->FindVirtualMethod(dex_cache.get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
@@ -4227,12 +4222,11 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
}
}
-mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
- uint32_t field_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
bool is_static) {
- DCHECK(dex_cache != NULL);
+ DCHECK(dex_cache.get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
@@ -4245,9 +4239,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
}
if (is_static) {
- resolved = klass->FindStaticField(dex_cache, field_idx);
+ resolved = klass->FindStaticField(dex_cache.get(), field_idx);
} else {
- resolved = klass->FindInstanceField(dex_cache, field_idx);
+ resolved = klass->FindInstanceField(dex_cache.get(), field_idx);
}
if (resolved == NULL) {
@@ -4269,9 +4263,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader) {
- DCHECK(dex_cache != NULL);
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 473370d90f..4e2cc063b3 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,6 +25,7 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "gtest/gtest.h"
+#include "jni.h"
#include "root_visitor.h"
#include "oat_file.h"
@@ -45,6 +46,7 @@ namespace mirror {
class InternTable;
class ObjectLock;
+class ScopedObjectAccess;
template<class T> class SirtRef;
typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
@@ -56,29 +58,31 @@ class ClassLinker {
// (non-marker) interfaces.
static constexpr size_t kImtSize = 64;
- // Creates the class linker by bootstrapping from dex files.
- static ClassLinker* CreateFromCompiler(const std::vector<const DexFile*>& boot_class_path,
- InternTable* intern_table)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ explicit ClassLinker(InternTable* intern_table);
+ ~ClassLinker();
- // Creates the class linker from an image.
- static ClassLinker* CreateFromImage(InternTable* intern_table)
+ // Initialize class linker by bootstraping from dex files
+ void InitFromCompiler(const std::vector<const DexFile*>& boot_class_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ~ClassLinker();
+ // Initialize class linker from one or more images.
+ void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsInBootClassPath(const char* descriptor);
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
- mirror::Class* FindClass(const char* descriptor, mirror::ClassLoader* class_loader)
+ mirror::Class* FindClass(const char* descriptor, SirtRef<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* FindSystemClass(const char* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Reutrns true if the class linker is initialized.
+ bool IsInitialized() const;
+
// Define a new a class based on a ClassDef from a DexFile
- mirror::Class* DefineClass(const char* descriptor, mirror::ClassLoader* class_loader,
+ mirror::Class* DefineClass(const char* descriptor, SirtRef<mirror::ClassLoader>& class_loader,
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -122,7 +126,7 @@ class ClassLinker {
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
- mirror::DexCache* dex_cache)
+ SirtRef<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
@@ -130,12 +134,7 @@ class ClassLinker {
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
const mirror::Class* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveType(dex_file,
- type_idx,
- referrer->GetDexCache(),
- referrer->GetClassLoader());
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
@@ -150,10 +149,9 @@ class ClassLinker {
// result in DexCache. The ClassLoader is used to search for the
// type, since it may be referenced from but not contained within
// the given DexFile.
- mirror::Class* ResolveType(const DexFile& dex_file,
- uint16_t type_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader)
+ mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method with a given ID from the DexFile, storing the
@@ -163,8 +161,8 @@ class ClassLinker {
// virtual method.
mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const mirror::ArtMethod* referrer,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -184,8 +182,8 @@ class ClassLinker {
// field.
mirror::ArtField* ResolveField(const DexFile& dex_file,
uint32_t field_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -195,8 +193,8 @@ class ClassLinker {
// field resolution semantics are followed.
mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader)
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get shorty from method index without resolution. Used to do handlerization.
@@ -314,10 +312,8 @@ class ClassLinker {
void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* CreateProxyClass(mirror::String* name, mirror::ObjectArray<mirror::Class>* interfaces,
- mirror::ClassLoader* loader,
- mirror::ObjectArray<mirror::ArtMethod>* methods,
- mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws)
+ mirror::Class* CreateProxyClass(ScopedObjectAccess& soa, jstring name, jobjectArray interfaces,
+ jobject loader, jobjectArray methods, jobjectArray throws)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::string GetDescriptorForProxy(const mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -364,18 +360,13 @@ class ClassLinker {
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- private:
- explicit ClassLinker(InternTable*);
+ // Special code to allocate an art method, use this instead of class->AllocObject.
+ mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
const OatFile::OatMethod GetOatMethodFor(const mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Initialize class linker by bootstraping from dex files
- void InitFromCompiler(const std::vector<const DexFile*>& boot_class_path)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Initialize class linker from one or more images.
- void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -393,7 +384,6 @@ class ClassLinker {
mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtField* AllocArtField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -401,7 +391,8 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* CreateArrayClass(const char* descriptor, mirror::ClassLoader* class_loader)
+ mirror::Class* CreateArrayClass(const char* descriptor,
+ SirtRef<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AppendToBootClassPath(const DexFile& dex_file)
@@ -458,8 +449,8 @@ class ClassLinker {
const mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkClass(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces,
- Thread* self)
+ bool LinkClass(Thread* self, SirtRef<mirror::Class>& klass,
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkSuperClass(SirtRef<mirror::Class>& klass)
@@ -468,14 +459,15 @@ class ClassLinker {
bool LoadSuperAndInterfaces(SirtRef<mirror::Class>& klass, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkMethods(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces)
+ bool LinkMethods(SirtRef<mirror::Class>& klass,
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkVirtualMethods(SirtRef<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkInterfaceMethods(SirtRef<mirror::Class>& klass,
- mirror::ObjectArray<mirror::Class>* interfaces)
+ SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkStaticFields(SirtRef<mirror::Class>& klass)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a52b680260..b8bc474b10 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -95,7 +95,8 @@ class ClassLinkerTest : public CommonTest {
const std::string& component_type,
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* array = class_linker_->FindClass(array_descriptor.c_str(), class_loader);
+ SirtRef<mirror::ClassLoader> loader(Thread::Current(), class_loader);
+ mirror::Class* array = class_linker_->FindClass(array_descriptor.c_str(), loader);
ClassHelper array_component_ch(array->GetComponentType());
EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor());
EXPECT_EQ(class_loader, array->GetClassLoader());
@@ -647,12 +648,12 @@ TEST_F(ClassLinkerTest, FindClassNested) {
ScopedObjectAccess soa(Thread::Current());
SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Nested")));
- mirror::Class* outer = class_linker_->FindClass("LNested;", class_loader.get());
+ mirror::Class* outer = class_linker_->FindClass("LNested;", class_loader);
ASSERT_TRUE(outer != NULL);
EXPECT_EQ(0U, outer->NumVirtualMethods());
EXPECT_EQ(1U, outer->NumDirectMethods());
- mirror::Class* inner = class_linker_->FindClass("LNested$Inner;", class_loader.get());
+ mirror::Class* inner = class_linker_->FindClass("LNested$Inner;", class_loader);
ASSERT_TRUE(inner != NULL);
EXPECT_EQ(0U, inner->NumVirtualMethods());
EXPECT_EQ(1U, inner->NumDirectMethods());
@@ -711,7 +712,7 @@ TEST_F(ClassLinkerTest, FindClass) {
SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
AssertNonExistentClass("LMyClass;");
- mirror::Class* MyClass = class_linker_->FindClass("LMyClass;", class_loader.get());
+ mirror::Class* MyClass = class_linker_->FindClass("LMyClass;", class_loader);
kh.ChangeClass(MyClass);
ASSERT_TRUE(MyClass != NULL);
ASSERT_TRUE(MyClass->GetClass() != NULL);
@@ -809,29 +810,30 @@ TEST_F(ClassLinkerTest, ValidateBoxedTypes) {
// Validate that the "value" field is always the 0th field in each of java.lang's box classes.
// This lets UnboxPrimitive avoid searching for the field by name at runtime.
ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
mirror::Class* c;
- c = class_linker_->FindClass("Ljava/lang/Boolean;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Boolean;", class_loader);
FieldHelper fh(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Byte;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Byte;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Character;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Character;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Double;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Double;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Float;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Float;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Integer;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Integer;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Long;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Long;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
- c = class_linker_->FindClass("Ljava/lang/Short;", NULL);
+ c = class_linker_->FindClass("Ljava/lang/Short;", class_loader);
fh.ChangeField(c->GetIFields()->Get(0));
EXPECT_STREQ("value", fh.GetName());
}
@@ -840,8 +842,8 @@ TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) {
ScopedObjectAccess soa(Thread::Current());
SirtRef<mirror::ClassLoader> class_loader_1(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
SirtRef<mirror::ClassLoader> class_loader_2(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
- mirror::Class* MyClass_1 = class_linker_->FindClass("LMyClass;", class_loader_1.get());
- mirror::Class* MyClass_2 = class_linker_->FindClass("LMyClass;", class_loader_2.get());
+ mirror::Class* MyClass_1 = class_linker_->FindClass("LMyClass;", class_loader_1);
+ mirror::Class* MyClass_2 = class_linker_->FindClass("LMyClass;", class_loader_2);
EXPECT_TRUE(MyClass_1 != NULL);
EXPECT_TRUE(MyClass_2 != NULL);
EXPECT_NE(MyClass_1, MyClass_2);
@@ -850,7 +852,7 @@ TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) {
TEST_F(ClassLinkerTest, StaticFields) {
ScopedObjectAccess soa(Thread::Current());
SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Statics")));
- mirror::Class* statics = class_linker_->FindClass("LStatics;", class_loader.get());
+ mirror::Class* statics = class_linker_->FindClass("LStatics;", class_loader);
class_linker_->EnsureInitialized(statics, true, true);
// Static final primitives that are initialized by a compile-time constant
@@ -932,11 +934,11 @@ TEST_F(ClassLinkerTest, StaticFields) {
TEST_F(ClassLinkerTest, Interfaces) {
ScopedObjectAccess soa(Thread::Current());
SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Interfaces")));
- mirror::Class* I = class_linker_->FindClass("LInterfaces$I;", class_loader.get());
- mirror::Class* J = class_linker_->FindClass("LInterfaces$J;", class_loader.get());
- mirror::Class* K = class_linker_->FindClass("LInterfaces$K;", class_loader.get());
- mirror::Class* A = class_linker_->FindClass("LInterfaces$A;", class_loader.get());
- mirror::Class* B = class_linker_->FindClass("LInterfaces$B;", class_loader.get());
+ mirror::Class* I = class_linker_->FindClass("LInterfaces$I;", class_loader);
+ mirror::Class* J = class_linker_->FindClass("LInterfaces$J;", class_loader);
+ mirror::Class* K = class_linker_->FindClass("LInterfaces$K;", class_loader);
+ mirror::Class* A = class_linker_->FindClass("LInterfaces$A;", class_loader);
+ mirror::Class* B = class_linker_->FindClass("LInterfaces$B;", class_loader);
EXPECT_TRUE(I->IsAssignableFrom(A));
EXPECT_TRUE(J->IsAssignableFrom(A));
EXPECT_TRUE(J->IsAssignableFrom(K));
@@ -995,8 +997,7 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0];
CHECK(dex_file != NULL);
-
- mirror::Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader.get());
+ mirror::Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader);
mirror::ArtMethod* clinit = klass->FindClassInitializer();
mirror::ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;");
const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;");
@@ -1049,10 +1050,9 @@ TEST_F(ClassLinkerTest, FinalizableBit) {
TEST_F(ClassLinkerTest, ClassRootDescriptors) {
ScopedObjectAccess soa(Thread::Current());
- ClassHelper kh;
for (int i = 0; i < ClassLinker::kClassRootsMax; i++) {
mirror::Class* klass = class_linker_->GetClassRoot(ClassLinker::ClassRoot(i));
- kh.ChangeClass(klass);
+ ClassHelper kh(klass);
EXPECT_TRUE(kh.GetDescriptor() != NULL);
EXPECT_STREQ(kh.GetDescriptor(),
class_linker_->GetClassRootDescriptor(ClassLinker::ClassRoot(i))) << " i = " << i;
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 643ed1d89d..7cc29a1e58 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -569,7 +569,8 @@ class CommonTest : public testing::Test {
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
- mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
+ SirtRef<mirror::ClassLoader> loader(Thread::Current(), class_loader);
+ mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), loader);
CHECK(klass != NULL) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
CompileMethod(klass->GetDirectMethod(i));
@@ -587,10 +588,8 @@ class CommonTest : public testing::Test {
MakeExecutable(method);
}
- void CompileDirectMethod(mirror::ClassLoader* class_loader,
- const char* class_name,
- const char* method_name,
- const char* signature)
+ void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
@@ -601,10 +600,8 @@ class CommonTest : public testing::Test {
CompileMethod(method);
}
- void CompileVirtualMethod(mirror::ClassLoader* class_loader,
- const char* class_name,
- const char* method_name,
- const char* signature)
+ void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
mirror::Class* klass = class_linker_->FindClass(class_descriptor.c_str(), class_loader);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0eecd28831..f537709261 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1118,7 +1118,8 @@ JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t
if (c == NULL) {
return status;
}
- new_array = gRegistry->Add(mirror::Array::Alloc(Thread::Current(), c, length));
+ new_array = gRegistry->Add(
+ mirror::Array::Alloc<kMovingCollector, true>(Thread::Current(), c, length));
return JDWP::ERR_NONE;
}
@@ -1133,38 +1134,26 @@ bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id)
static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(FATAL);
-#else
+ CHECK(!kMovingFields);
return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
-#endif
}
static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(FATAL);
-#else
+ CHECK(!kMovingMethods);
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
-#endif
}
static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(FATAL);
-#else
+ CHECK(!kMovingFields);
return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
-#endif
}
static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(FATAL);
-#else
+ CHECK(!kMovingMethods);
return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
-#endif
}
static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
@@ -2079,7 +2068,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl
CHECK_EQ(width_, sizeof(JDWP::ObjectId));
mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
VLOG(jdwp) << "get array local " << reg << " = " << o;
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
}
JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
@@ -2095,7 +2084,7 @@ void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int sl
CHECK_EQ(width_, sizeof(JDWP::ObjectId));
mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
VLOG(jdwp) << "get object local " << reg << " = " << o;
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
}
tag_ = TagFromObject(o);
@@ -3372,7 +3361,7 @@ class HeapChunkContext {
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
- if (!Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
}
@@ -3752,7 +3741,6 @@ jbyteArray Dbg::GetRecentAllocations() {
count = gAllocRecordCount;
idx = HeadIndex();
- ClassHelper kh;
while (count--) {
// For each entry:
// (4b) total allocation size
@@ -3761,7 +3749,7 @@ jbyteArray Dbg::GetRecentAllocations() {
// (1b) stack depth
AllocRecord* record = &recent_allocation_records_[idx];
size_t stack_depth = record->GetDepth();
- kh.ChangeClass(record->type);
+ ClassHelper kh(record->type);
size_t allocated_object_class_name_index = class_names.IndexOf(kh.GetDescriptor());
JDWP::Append4BE(bytes, record->byte_count);
JDWP::Append2BE(bytes, record->thin_lock_id);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index a897cce2e0..a02823eb90 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -37,6 +37,7 @@
#include "os.h"
#include "safe_map.h"
#include "ScopedFd.h"
+#include "sirt_ref.h"
#include "thread.h"
#include "UniquePtr.h"
#include "utf-inl.h"
@@ -963,12 +964,14 @@ static uint64_t ReadUnsignedLong(const byte* ptr, int zwidth, bool fill_on_right
}
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>* dex_cache,
+ SirtRef<mirror::ClassLoader>* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
array_size_(), pos_(-1), type_(kByte) {
+ DCHECK(dex_cache != nullptr);
+ DCHECK(class_loader != nullptr);
ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
if (ptr_ == NULL) {
array_size_ = 0;
@@ -1051,12 +1054,15 @@ void EncodedStaticFieldValueIterator::ReadValueToField(mirror::ArtField* field)
case kDouble: field->SetDouble(field->GetDeclaringClass(), jval_.d); break;
case kNull: field->SetObject(field->GetDeclaringClass(), NULL); break;
case kString: {
- mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_);
+ CHECK(!kMovingFields);
+ mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
field->SetObject(field->GetDeclaringClass(), resolved);
break;
}
case kType: {
- mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, dex_cache_, class_loader_);
+ CHECK(!kMovingFields);
+ mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, *dex_cache_,
+ *class_loader_);
field->SetObject(field->GetDeclaringClass(), resolved);
break;
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index a9c24e66c1..51ab8d81a8 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -43,6 +43,8 @@ namespace mirror {
} // namespace mirror
class ClassLinker;
class Signature;
+template <typename T>
+class SirtRef;
class StringPiece;
class ZipArchive;
@@ -1152,8 +1154,8 @@ class ClassDataItemIterator {
class EncodedStaticFieldValueIterator {
public:
- EncodedStaticFieldValueIterator(const DexFile& dex_file, mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ EncodedStaticFieldValueIterator(const DexFile& dex_file, SirtRef<mirror::DexCache>* dex_cache,
+ SirtRef<mirror::ClassLoader>* class_loader,
ClassLinker* linker, const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1187,8 +1189,8 @@ class EncodedStaticFieldValueIterator {
static const byte kEncodedValueArgShift = 5;
const DexFile& dex_file_;
- mirror::DexCache* dex_cache_; // Dex cache to resolve literal objects.
- mirror::ClassLoader* class_loader_; // ClassLoader to resolve types.
+ SirtRef<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
+ SirtRef<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types.
ClassLinker* linker_; // Linker to resolve literal objects.
size_t array_size_; // Size of array.
size_t pos_; // Current position.
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 24ab1ce9da..d7bbe6475a 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -82,7 +82,7 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod*
if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
return NULL;
}
- return mirror::Array::AllocUninstrumented(self, klass, component_count);
+ return mirror::Array::Alloc<kMovingCollector, false>(self, klass, component_count);
}
// Helper function to allocate array for FILLED_NEW_ARRAY.
@@ -93,7 +93,7 @@ mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror:
if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
return NULL;
}
- return mirror::Array::AllocInstrumented(self, klass, component_count);
+ return mirror::Array::Alloc<kMovingCollector, true>(self, klass, component_count);
}
void ThrowStackOverflowError(Thread* self) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 7ce50c5dfb..3b58a8da30 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -88,7 +88,7 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::Art
if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
return NULL;
}
- return klass->AllocObjectUninstrumented(self);
+ return klass->Alloc<kMovingCollector, false>(self);
}
static inline mirror::Object* AllocObjectFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
@@ -99,7 +99,7 @@ static inline mirror::Object* AllocObjectFromCodeInstrumented(uint32_t type_idx,
if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
return NULL;
}
- return klass->AllocObjectInstrumented(self);
+ return klass->Alloc<kMovingCollector, true>(self);
}
static inline bool CheckArrayAlloc(uint32_t type_idx, mirror::ArtMethod* method,
@@ -142,7 +142,7 @@ static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::ArtMe
if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
return NULL;
}
- return mirror::Array::AllocUninstrumented(self, klass, component_count);
+ return mirror::Array::Alloc<kMovingCollector, false>(self, klass, component_count);
}
static inline mirror::Array* AllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
@@ -153,7 +153,7 @@ static inline mirror::Array* AllocArrayFromCodeInstrumented(uint32_t type_idx, m
if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
return NULL;
}
- return mirror::Array::AllocInstrumented(self, klass, component_count);
+ return mirror::Array::Alloc<kMovingCollector, true>(self, klass, component_count);
}
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 2102ab1bea..540abb3ef9 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -29,9 +29,15 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::
"Null reference used for synchronization (monitor-enter)");
return -1; // Failure.
} else {
- obj->MonitorEnter(self); // May block
- DCHECK(self->HoldsLock(obj));
- DCHECK(!self->IsExceptionPending());
+ if (kIsDebugBuild) {
+ // GC may move the obj, need Sirt for the following DCHECKs.
+ SirtRef<mirror::Object> sirt_obj(self, obj);
+ obj->MonitorEnter(self); // May block
+ CHECK(self->HoldsLock(sirt_obj.get()));
+ CHECK(!self->IsExceptionPending());
+ } else {
+ obj->MonitorEnter(self); // May block
+ }
return 0; // Success.
// Only possible exception is NPE and is handled before entry
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 01d3549985..8ba08ee604 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -416,10 +416,10 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
// Read object references held in arguments from quick frames and place in a JNI local references,
// so they don't get garbage collected.
-class RememberFoGcArgumentVisitor : public QuickArgumentVisitor {
+class RememberForGcArgumentVisitor : public QuickArgumentVisitor {
public:
- RememberFoGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+ RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
+ uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -441,7 +441,7 @@ class RememberFoGcArgumentVisitor : public QuickArgumentVisitor {
private:
ScopedObjectAccessUnchecked* soa_;
std::vector<std::pair<jobject, mirror::Object**> > references_;
- DISALLOW_COPY_AND_ASSIGN(RememberFoGcArgumentVisitor);
+ DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
};
// Lazily resolve a method for quick. Called by stub code.
@@ -531,7 +531,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
uint32_t shorty_len;
const char* shorty =
dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
- RememberFoGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
+ RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
thread->EndAssertNoThreadSuspension(old_cause);
// Resolve method filling in dex cache.
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index a5f999784d..e9a6e4fa49 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -39,7 +39,7 @@ class ExceptionTest : public CommonTest {
ScopedObjectAccess soa(Thread::Current());
SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle")));
- my_klass_ = class_linker_->FindClass("LExceptionHandle;", class_loader.get());
+ my_klass_ = class_linker_->FindClass("LExceptionHandle;", class_loader);
ASSERT_TRUE(my_klass_ != NULL);
class_linker_->EnsureInitialized(my_klass_, true, true);
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 7cbe94d3d2..faa198a370 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -82,7 +82,7 @@ class ModUnionUpdateObjectReferencesVisitor {
if (ref != nullptr) {
Object* new_ref = visitor_(ref, arg_);
if (new_ref != ref) {
- obj->SetFieldObject(offset, ref, false, true);
+ obj->SetFieldObject(offset, new_ref, true);
}
}
}
@@ -154,7 +154,7 @@ class ModUnionReferenceVisitor {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
@@ -206,7 +206,7 @@ class ModUnionCheckReferences {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
DCHECK(obj != NULL);
CheckReferenceVisitor visitor(mod_union_table_, references_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
}
private:
@@ -334,7 +334,7 @@ void ModUnionTableCardCache::Dump(std::ostream& os) {
for (const byte* card_addr : cleared_cards_) {
auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
auto end = start + CardTable::kCardSize;
- os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+ os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
}
os << "]";
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 6691cadbd4..178910347c 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -36,6 +36,7 @@ namespace collector {
GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
: heap_(heap),
name_(name),
+ clear_soft_references_(false),
verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
timings_(name_.c_str(), true, verbose_),
@@ -60,11 +61,18 @@ void GarbageCollector::ResetCumulativeStatistics() {
total_freed_bytes_ = 0;
}
-void GarbageCollector::Run() {
+void GarbageCollector::Run(bool clear_soft_references) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
uint64_t start_time = NanoTime();
pause_times_.clear();
duration_ns_ = 0;
+ clear_soft_references_ = clear_soft_references;
+
+ // Reset stats.
+ freed_bytes_ = 0;
+ freed_large_object_bytes_ = 0;
+ freed_objects_ = 0;
+ freed_large_objects_ = 0;
InitializePhase();
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0f566c954b..6111c2fbf2 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -46,7 +46,7 @@ class GarbageCollector {
virtual GcType GetGcType() const = 0;
// Run the garbage collector.
- void Run();
+ void Run(bool clear_soft_references);
Heap* GetHeap() const {
return heap_;
@@ -78,6 +78,34 @@ class GarbageCollector {
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ size_t GetFreedBytes() const {
+ return freed_bytes_;
+ }
+
+ size_t GetFreedLargeObjectBytes() const {
+ return freed_large_object_bytes_;
+ }
+
+ size_t GetFreedObjects() const {
+ return freed_objects_;
+ }
+
+ size_t GetFreedLargeObjects() const {
+ return freed_large_objects_;
+ }
+
+ uint64_t GetTotalPausedTimeNs() const {
+ return total_paused_time_ns_;
+ }
+
+ uint64_t GetTotalFreedBytes() const {
+ return total_freed_bytes_;
+ }
+
+ uint64_t GetTotalFreedObjects() const {
+ return total_freed_objects_;
+ }
+
protected:
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
@@ -98,6 +126,8 @@ class GarbageCollector {
std::string name_;
+ bool clear_soft_references_;
+
const bool verbose_;
uint64_t duration_ns_;
@@ -109,6 +139,12 @@ class GarbageCollector {
uint64_t total_freed_objects_;
uint64_t total_freed_bytes_;
+ // Single GC statitstics.
+ AtomicInteger freed_bytes_;
+ AtomicInteger freed_large_object_bytes_;
+ AtomicInteger freed_objects_;
+ AtomicInteger freed_large_objects_;
+
CumulativeLogger cumulative_timings_;
std::vector<uint64_t> pause_times_;
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 270c9efde9..7a51553992 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -44,8 +44,7 @@ inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& v
if (klass->IsObjectArrayClass()) {
VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
}
- } else if (UNLIKELY(klass == java_lang_Class_)) {
- DCHECK_EQ(klass->GetClass(), java_lang_Class_);
+ } else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
if (kCountScannedTypes) {
++class_count_;
}
@@ -56,7 +55,7 @@ inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& v
}
VisitOtherReferences(klass, obj, visitor);
if (UNLIKELY(klass->IsReferenceClass())) {
- DelayReferenceReferent(klass, const_cast<mirror::Object*>(obj));
+ DelayReferenceReferent(klass, obj);
}
}
}
@@ -68,11 +67,10 @@ inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor&
Locks::mutator_lock_) {
DCHECK(obj != NULL);
DCHECK(obj->GetClass() != NULL);
-
mirror::Class* klass = obj->GetClass();
DCHECK(klass != NULL);
if (visit_class) {
- visitor(obj, klass, MemberOffset(0), false);
+ visitor(obj, klass, mirror::Object::ClassOffset(), false);
}
if (klass == mirror::Class::GetJavaLangClass()) {
DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
@@ -90,8 +88,7 @@ inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor&
}
template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass,
- mirror::Object* obj,
+inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != NULL);
@@ -119,11 +116,6 @@ inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_o
bool is_static, const Visitor& visitor) {
if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
// Found a reference offset bitmap. Mark the specified offsets.
-#ifndef MOVING_COLLECTOR
- // Clear the class bit since we mark the class as part of marking the classlinker roots.
- DCHECK_EQ(mirror::Object::ClassOffset().Uint32Value(), 0U);
- ref_offsets &= (1U << (sizeof(ref_offsets) * 8 - 1)) - 1;
-#endif
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2c69c77187..11e911cf46 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -93,6 +93,8 @@ void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
}
// Add the space to the immune region.
+ // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
+ // callbacks.
if (immune_begin_ == NULL) {
DCHECK(immune_end_ == NULL);
SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
@@ -108,14 +110,14 @@ void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
}
// If previous space was immune, then extend the immune region. Relies on continuous spaces
// being sorted by Heap::AddContinuousSpace.
- if (prev_space != NULL && IsImmuneSpace(prev_space)) {
+ if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
}
}
}
-bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) {
+bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
return
immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
immune_end_ >= reinterpret_cast<Object*>(space->End());
@@ -135,10 +137,9 @@ void MarkSweep::BindBitmaps() {
MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
: GarbageCollector(heap,
- name_prefix + (name_prefix.empty() ? "" : " ") +
+ name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
current_mark_bitmap_(NULL),
- java_lang_Class_(NULL),
mark_stack_(NULL),
immune_begin_(NULL),
immune_end_(NULL),
@@ -150,8 +151,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
gc_barrier_(new Barrier(0)),
large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
- is_concurrent_(is_concurrent),
- clear_soft_references_(false) {
+ is_concurrent_(is_concurrent) {
}
void MarkSweep::InitializePhase() {
@@ -165,10 +165,6 @@ void MarkSweep::InitializePhase() {
finalizer_reference_list_ = nullptr;
phantom_reference_list_ = nullptr;
cleared_reference_list_ = nullptr;
- freed_bytes_ = 0;
- freed_large_object_bytes_ = 0;
- freed_objects_ = 0;
- freed_large_objects_ = 0;
class_count_ = 0;
array_count_ = 0;
other_count_ = 0;
@@ -179,8 +175,6 @@ void MarkSweep::InitializePhase() {
work_chunks_created_ = 0;
work_chunks_deleted_ = 0;
reference_count_ = 0;
- java_lang_Class_ = Class::GetJavaLangClass();
- CHECK(java_lang_Class_ != nullptr);
FindDefaultMarkBitmap();
@@ -294,8 +288,7 @@ void MarkSweep::MarkReachableObjects() {
// knowing that new allocations won't be marked as live.
timings_.StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
- heap_->large_object_space_->GetLiveObjects(), live_stack);
+ heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
timings_.EndSplit();
// Recursively mark all the non-image bits set in the mark bitmap.
@@ -371,8 +364,10 @@ void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
void MarkSweep::FindDefaultMarkBitmap() {
base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
- current_mark_bitmap_ = space->GetMarkBitmap();
+ accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+ if (bitmap != nullptr &&
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+ current_mark_bitmap_ = bitmap;
CHECK(current_mark_bitmap_ != NULL);
return;
}
@@ -613,10 +608,8 @@ void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
CHECK(space->IsDlMallocSpace());
space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+ accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
- alloc_space->temp_bitmap_.reset(mark_bitmap);
- alloc_space->mark_bitmap_.reset(live_bitmap);
}
class ScanObjectVisitor {
@@ -625,7 +618,7 @@ class ScanObjectVisitor {
: mark_sweep_(mark_sweep) {}
// TODO: Fixme when anotatalysis works with visitors.
- void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -814,6 +807,9 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
mark_stack_size / mark_stack_tasks + 1);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->GetMarkBitmap() == nullptr) {
+ continue;
+ }
byte* card_begin = space->Begin();
byte* card_end = space->End();
// Align up the end address. For example, the image space's end
@@ -856,24 +852,26 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
timings_.EndSplit();
} else {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- // Image spaces are handled properly since live == marked for them.
- switch (space->GetGcRetentionPolicy()) {
- case space::kGcRetentionPolicyNeverCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
- "ScanGrayImageSpaceObjects");
- break;
- case space::kGcRetentionPolicyFullCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
- "ScanGrayZygoteSpaceObjects");
- break;
- case space::kGcRetentionPolicyAlwaysCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
- "ScanGrayAllocSpaceObjects");
- break;
- }
- ScanObjectVisitor visitor(this);
- card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
- timings_.EndSplit();
+ if (space->GetMarkBitmap() != nullptr) {
+ // Image spaces are handled properly since live == marked for them.
+ switch (space->GetGcRetentionPolicy()) {
+ case space::kGcRetentionPolicyNeverCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+ "ScanGrayImageSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyFullCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+ "ScanGrayZygoteSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyAlwaysCollect:
+ timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+ "ScanGrayAllocSpaceObjects");
+ break;
+ }
+ ScanObjectVisitor visitor(this);
+ card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
+ timings_.EndSplit();
+ }
}
}
}
@@ -954,9 +952,8 @@ void MarkSweep::RecursiveMark() {
if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
(!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
current_mark_bitmap_ = space->GetMarkBitmap();
- if (current_mark_bitmap_ == NULL) {
- GetHeap()->DumpSpaces();
- LOG(FATAL) << "invalid bitmap";
+ if (current_mark_bitmap_ == nullptr) {
+ continue;
}
if (parallel) {
// We will use the mark stack the future.
@@ -1121,7 +1118,7 @@ void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
}
void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
- space::DlMallocSpace* space = heap_->GetAllocSpace();
+ space::DlMallocSpace* space = heap_->GetNonMovingSpace();
timings_.StartSplit("SweepArray");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
@@ -1207,8 +1204,11 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
scc.mark_sweep = this;
scc.self = Thread::Current();
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (!space->IsDlMallocSpace()) {
+ continue;
+ }
// We always sweep always collect spaces.
- bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+ bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
if (!partial && !sweep_space) {
// We sweep full collect spaces when the GC isn't a partial GC (ie its full).
sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
@@ -1370,9 +1370,9 @@ class MarkObjectVisitor {
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
-void MarkSweep::ScanObject(const Object* obj) {
+void MarkSweep::ScanObject(Object* obj) {
MarkObjectVisitor visitor(this);
- ScanObjectVisit(const_cast<Object*>(obj), visitor);
+ ScanObjectVisit(obj, visitor);
}
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
@@ -1406,12 +1406,12 @@ void MarkSweep::ProcessMarkStack(bool paused) {
} else {
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- const Object* obj = NULL;
+ Object* obj = NULL;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
- const Object* obj = mark_stack_->PopBack();
+ Object* obj = mark_stack_->PopBack();
DCHECK(obj != NULL);
__builtin_prefetch(obj);
prefetch_fifo.push_back(obj);
@@ -1603,9 +1603,6 @@ void MarkSweep::FinishPhase() {
timings_.NewSplit("PostGcVerification");
heap->PostGcVerification(this);
- timings_.NewSplit("GrowForUtilization");
- heap->GrowForUtilization(GetGcType(), GetDurationNs());
-
timings_.NewSplit("RequestHeapTrim");
heap->RequestHeapTrim();
@@ -1651,8 +1648,10 @@ void MarkSweep::FinishPhase() {
// Clear all of the spaces' mark bitmaps.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
- space->GetMarkBitmap()->Clear();
+ accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+ if (bitmap != nullptr &&
+ space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+ bitmap->Clear();
}
}
mark_stack_->Reset();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3bc014aa1e..cc5841244d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -114,7 +114,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsImmuneSpace(const space::ContinuousSpace* space)
+ bool IsImmuneSpace(const space::ContinuousSpace* space) const;
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
@@ -140,6 +140,7 @@ class MarkSweep : public GarbageCollector {
void ProcessReferences(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Update and mark references from immune spaces.
virtual void UpdateAndMarkModUnion()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -158,7 +159,7 @@ class MarkSweep : public GarbageCollector {
}
// Blackens an object.
- void ScanObject(const mirror::Object* obj)
+ void ScanObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -167,38 +168,6 @@ class MarkSweep : public GarbageCollector {
void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
NO_THREAD_SAFETY_ANALYSIS;
- size_t GetFreedBytes() const {
- return freed_bytes_;
- }
-
- size_t GetFreedLargeObjectBytes() const {
- return freed_large_object_bytes_;
- }
-
- size_t GetFreedObjects() const {
- return freed_objects_;
- }
-
- size_t GetFreedLargeObjects() const {
- return freed_large_objects_;
- }
-
- uint64_t GetTotalTimeNs() const {
- return total_time_ns_;
- }
-
- uint64_t GetTotalPausedTimeNs() const {
- return total_paused_time_ns_;
- }
-
- uint64_t GetTotalFreedObjects() const {
- return total_freed_objects_;
- }
-
- uint64_t GetTotalFreedBytes() const {
- return total_freed_bytes_;
- }
-
// Everything inside the immune range is assumed to be marked.
void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
@@ -216,8 +185,7 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
template <typename Visitor>
- static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
- bool visit_class = false)
+ static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, bool visit_class)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
@@ -395,9 +363,6 @@ class MarkSweep : public GarbageCollector {
// object.
accounting::SpaceBitmap* current_mark_bitmap_;
- // Cache java.lang.Class for optimization.
- mirror::Class* java_lang_Class_;
-
accounting::ObjectStack* mark_stack_;
// Immune range, every object inside the immune range is assumed to be marked.
@@ -412,14 +377,6 @@ class MarkSweep : public GarbageCollector {
// Parallel finger.
AtomicInteger atomic_finger_;
- // Number of non large object bytes freed in this collection.
- AtomicInteger freed_bytes_;
- // Number of large object bytes freed.
- AtomicInteger freed_large_object_bytes_;
- // Number of objects freed in this collection.
- AtomicInteger freed_objects_;
- // Number of freed large objects.
- AtomicInteger freed_large_objects_;
// Number of classes scanned, if kCountScannedTypes.
AtomicInteger class_count_;
// Number of arrays scanned, if kCountScannedTypes.
@@ -443,8 +400,6 @@ class MarkSweep : public GarbageCollector {
const bool is_concurrent_;
- bool clear_soft_references_;
-
private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CardScanTask;
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 29367ce0bf..8ec28f3174 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -26,7 +26,7 @@ namespace gc {
namespace collector {
PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
- : MarkSweep(heap, is_concurrent, name_prefix + (name_prefix.empty() ? "" : " ") + "partial") {
+ : MarkSweep(heap, is_concurrent, name_prefix.empty() ? "partial " : name_prefix) {
cumulative_timings_.SetName(GetName());
}
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
new file mode 100644
index 0000000000..3b8f7c395d
--- /dev/null
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+
+namespace art {
+namespace gc {
+namespace collector {
+
+inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
+ DCHECK(from_space_->HasAddress(obj));
+ LockWord lock_word = obj->GetLockWord();
+ if (lock_word.GetState() != LockWord::kForwardingAddress) {
+ return nullptr;
+ }
+ return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
new file mode 100644
index 0000000000..d833631da9
--- /dev/null
+++ b/runtime/gc/collector/semi_space.cc
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "semi_space.h"
+
+#include <functional>
+#include <numeric>
+#include <climits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "semi_space-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "verifier/method_verifier.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+static constexpr bool kProtectFromSpace = true;
+static constexpr bool kResetFromSpace = true;
+
+// TODO: Unduplicate logic.
+void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
+ // Bind live to mark bitmap if necessary.
+ if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+ BindLiveToMarkBitmap(space);
+ }
+ // Add the space to the immune region.
+ if (immune_begin_ == nullptr) {
+ DCHECK(immune_end_ == nullptr);
+ immune_begin_ = reinterpret_cast<Object*>(space->Begin());
+ immune_end_ = reinterpret_cast<Object*>(space->End());
+ } else {
+ const space::ContinuousSpace* prev_space = nullptr;
+ // Find out if the previous space is immune.
+ for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+ if (cur_space == space) {
+ break;
+ }
+ prev_space = cur_space;
+ }
+ // If previous space was immune, then extend the immune region. Relies on continuous spaces
+ // being sorted by Heap::AddContinuousSpace.
+ if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
+ immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+ immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+ }
+ }
+}
+
+void SemiSpace::BindBitmaps() {
+ timings_.StartSplit("BindBitmaps");
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // Mark all of the spaces we never collect as immune.
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
+ || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ ImmuneSpace(space);
+ }
+ }
+ timings_.EndSplit();
+}
+
+SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+ : GarbageCollector(heap,
+ name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
+ mark_stack_(nullptr),
+ immune_begin_(nullptr),
+ immune_end_(nullptr),
+ to_space_(nullptr),
+ from_space_(nullptr),
+ soft_reference_list_(nullptr),
+ weak_reference_list_(nullptr),
+ finalizer_reference_list_(nullptr),
+ phantom_reference_list_(nullptr),
+ cleared_reference_list_(nullptr),
+ self_(nullptr) {
+}
+
+void SemiSpace::InitializePhase() {
+ timings_.Reset();
+ base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ mark_stack_ = heap_->mark_stack_.get();
+ DCHECK(mark_stack_ != nullptr);
+ immune_begin_ = nullptr;
+ immune_end_ = nullptr;
+ soft_reference_list_ = nullptr;
+ weak_reference_list_ = nullptr;
+ finalizer_reference_list_ = nullptr;
+ phantom_reference_list_ = nullptr;
+ cleared_reference_list_ = nullptr;
+ self_ = Thread::Current();
+ // Do any pre GC verification.
+ timings_.NewSplit("PreGcVerification");
+ heap_->PreGcVerification(this);
+}
+
+void SemiSpace::ProcessReferences(Thread* self) {
+ base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+ &finalizer_reference_list_, &phantom_reference_list_);
+}
+
+void SemiSpace::MarkingPhase() {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+ // wrong space.
+ heap_->SwapSemiSpaces();
+ // Assume the cleared space is already empty.
+ BindBitmaps();
+ // Process dirty cards and add dirty cards to mod-union tables.
+ heap_->ProcessCards(timings_);
+ // Need to do this before the checkpoint since we don't want any threads to add references to
+ // the live stack during the recursive mark.
+ timings_.NewSplit("SwapStacks");
+ heap_->SwapStacks();
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ MarkRoots();
+ // Mark roots of immune spaces.
+ UpdateAndMarkModUnion();
+ // Recursively mark remaining objects.
+ MarkReachableObjects();
+}
+
+bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
+ return
+ immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+ immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
+void SemiSpace::UpdateAndMarkModUnion() {
+ for (auto& space : heap_->GetContinuousSpaces()) {
+ // If the space is immune then we need to mark the references to other spaces.
+ if (IsImmuneSpace(space)) {
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ CHECK(table != nullptr);
+ // TODO: Improve naming.
+ base::TimingLogger::ScopedSplit split(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ &timings_);
+ table->UpdateAndMarkReferences(MarkRootCallback, this);
+ }
+ }
+}
+
+void SemiSpace::MarkReachableObjects() {
+ timings_.StartSplit("MarkStackAsLive");
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ timings_.EndSplit();
+ // Recursively process the mark stack.
+ ProcessMarkStack(true);
+}
+
+void SemiSpace::ReclaimPhase() {
+ base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ Thread* self = Thread::Current();
+ ProcessReferences(self);
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ SweepSystemWeaks();
+ }
+ // Record freed memory.
+ int from_bytes = from_space_->GetBytesAllocated();
+ int to_bytes = to_space_->GetBytesAllocated();
+ int from_objects = from_space_->GetObjectsAllocated();
+ int to_objects = to_space_->GetObjectsAllocated();
+ int freed_bytes = from_bytes - to_bytes;
+ int freed_objects = from_objects - to_objects;
+ CHECK_GE(freed_bytes, 0);
+ freed_bytes_.fetch_add(freed_bytes);
+ freed_objects_.fetch_add(freed_objects);
+ heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
+
+ timings_.StartSplit("PreSweepingGcVerification");
+ heap_->PreSweepingGcVerification(this);
+ timings_.EndSplit();
+
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // Reclaim unmarked objects.
+ Sweep(false);
+ // Swap the live and mark bitmaps for each space which we modified space. This is an
+ // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+ // bitmaps.
+ timings_.StartSplit("SwapBitmaps");
+ SwapBitmaps();
+ timings_.EndSplit();
+ // Unbind the live and mark bitmaps.
+ UnBindBitmaps();
+ }
+ // Release the memory used by the from space.
+ if (kResetFromSpace) {
+ // Clearing from space.
+ from_space_->Clear();
+ }
+ // Protect the from space.
+ VLOG(heap)
+ << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
+ << reinterpret_cast<void*>(from_space_->Limit());
+ if (kProtectFromSpace) {
+ mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+ } else {
+ mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+ }
+}
+
+void SemiSpace::ResizeMarkStack(size_t new_size) {
+ std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+ CHECK_LE(mark_stack_->Size(), new_size);
+ mark_stack_->Resize(new_size);
+ for (const auto& obj : temp) {
+ mark_stack_->PushBack(obj);
+ }
+}
+
+inline void SemiSpace::MarkStackPush(Object* obj) {
+ if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+ ResizeMarkStack(mark_stack_->Capacity() * 2);
+ }
+ // The object must be pushed on to the mark stack.
+ mark_stack_->PushBack(obj);
+}
+
+// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
+bool SemiSpace::MarkLargeObject(const Object* obj) {
+ // TODO: support >1 discontinuous space.
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+ if (UNLIKELY(!large_objects->Test(obj))) {
+ large_objects->Set(obj);
+ return true;
+ }
+ return false;
+}
+
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+Object* SemiSpace::MarkObject(Object* obj) {
+ Object* ret = obj;
+ if (obj != nullptr && !IsImmune(obj)) {
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (!to_space_->HasAddress(forward_address)) {
+ // Otherwise, we need to move the object and add it to the markstack for processing.
+ size_t object_size = obj->SizeOf();
+ size_t dummy = 0;
+ forward_address = to_space_->Alloc(self_, object_size, &dummy);
+ // Copy over the object and add it to the mark stack since we still need to update it's
+ // references.
+ memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't get stomped over.
+ COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*),
+ monitor_size_must_be_same_as_object);
+ obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
+ MarkStackPush(forward_address);
+ }
+ ret = forward_address;
+ // TODO: Do we need this if in the else statement?
+ } else {
+ accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+ if (LIKELY(object_bitmap != nullptr)) {
+ // This object was not previously marked.
+ if (!object_bitmap->Test(obj)) {
+ object_bitmap->Set(obj);
+ MarkStackPush(obj);
+ }
+ } else {
+ DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
+ if (MarkLargeObject(obj)) {
+ MarkStackPush(obj);
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
+}
+
+// Marks all objects in the root set.
+void SemiSpace::MarkRoots() {
+ timings_.StartSplit("MarkRoots");
+ // TODO: Visit up image roots as well?
+ Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
+ timings_.EndSplit();
+}
+
+void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+ CHECK(space->IsDlMallocSpace());
+ space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
+ GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+}
+
+mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
+ if (from_space_->HasAddress(obj)) {
+ LOG(FATAL) << "Shouldn't happen!";
+ return GetForwardingAddressInFromSpace(obj);
+ }
+ return obj;
+}
+
+mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+ return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
+}
+
+void SemiSpace::SweepSystemWeaks() {
+ timings_.StartSplit("SweepSystemWeaks");
+ Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+ timings_.EndSplit();
+}
+
+struct SweepCallbackContext {
+ SemiSpace* mark_sweep;
+ space::AllocSpace* space;
+ Thread* self;
+};
+
+void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ SemiSpace* gc = context->mark_sweep;
+ Heap* heap = gc->GetHeap();
+ space::AllocSpace* space = context->space;
+ Thread* self = context->self;
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+ size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
+ heap->RecordFree(num_ptrs, freed_bytes);
+ gc->freed_objects_.fetch_add(num_ptrs);
+ gc->freed_bytes_.fetch_add(freed_bytes);
+}
+
+void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+ Heap* heap = context->mark_sweep->GetHeap();
+ // We don't free any actual memory to avoid dirtying the shared zygote pages.
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ Object* obj = static_cast<Object*>(ptrs[i]);
+ heap->GetLiveBitmap()->Clear(obj);
+ heap->GetCardTable()->MarkCard(obj);
+ }
+}
+
+void SemiSpace::Sweep(bool swap_bitmaps) {
+ DCHECK(mark_stack_->IsEmpty());
+ base::TimingLogger::ScopedSplit("Sweep", &timings_);
+
+ const bool partial = (GetGcType() == kGcTypePartial);
+ SweepCallbackContext scc;
+ scc.mark_sweep = this;
+ scc.self = Thread::Current();
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (!space->IsDlMallocSpace()) {
+ continue;
+ }
+ // We always sweep always collect spaces.
+ bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+ if (!partial && !sweep_space) {
+ // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+ sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+ }
+ if (sweep_space && space->IsDlMallocSpace()) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+ scc.space = space->AsDlMallocSpace();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(live_bitmap, mark_bitmap);
+ }
+ if (!space->IsZygoteSpace()) {
+ base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+ // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+ accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+ &SweepCallback, reinterpret_cast<void*>(&scc));
+ } else {
+ base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+ // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+ // memory.
+ accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+ &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+ }
+ }
+ }
+
+ SweepLargeObjects(swap_bitmaps);
+}
+
+void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+ base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ // Sweep large objects
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+ accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+ if (swap_bitmaps) {
+ std::swap(large_live_objects, large_mark_objects);
+ }
+ // O(n*log(n)) but hopefully there are not too many large objects.
+ size_t freed_objects = 0;
+ size_t freed_bytes = 0;
+ Thread* self = Thread::Current();
+ for (const Object* obj : large_live_objects->GetObjects()) {
+ if (!large_mark_objects->Test(obj)) {
+ freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
+ ++freed_objects;
+ }
+ }
+ freed_large_objects_.fetch_add(freed_objects);
+ freed_large_object_bytes_.fetch_add(freed_bytes);
+ GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+ DCHECK(klass != nullptr);
+ DCHECK(klass->IsReferenceClass());
+ DCHECK(obj != nullptr);
+ Object* referent = heap_->GetReferenceReferent(obj);
+ if (referent != nullptr) {
+ Object* forward_address = GetMarkedForwardAddress(referent);
+ if (forward_address == nullptr) {
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
+ if (klass->IsSoftReferenceClass()) {
+ MutexLock mu(self, *heap_->GetSoftRefQueueLock());
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+ }
+ } else if (klass->IsWeakReferenceClass()) {
+ MutexLock mu(self, *heap_->GetWeakRefQueueLock());
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+ }
+ } else if (klass->IsFinalizerReferenceClass()) {
+ MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+ }
+ } else if (klass->IsPhantomReferenceClass()) {
+ MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
+ if (!heap_->IsEnqueued(obj)) {
+ heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+ }
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+ << klass->GetAccessFlags();
+ }
+ } else if (referent != forward_address) {
+ heap_->SetReferenceReferent(obj, forward_address);
+ }
+ }
+}
+
+// Visit all of the references of an object and update.
+void SemiSpace::ScanObject(Object* obj) {
+ DCHECK(obj != NULL);
+ DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
+ MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
+ bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ mirror::Object* new_address = MarkObject(ref);
+ if (new_address != ref) {
+ DCHECK(new_address != nullptr);
+ obj->SetFieldObject(offset, new_address, false);
+ }
+ }, kMovingClasses);
+ mirror::Class* klass = obj->GetClass();
+ if (UNLIKELY(klass->IsReferenceClass())) {
+ DelayReferenceReferent(klass, obj);
+ }
+}
+
+// Scan anything that's on the mark stack.
+void SemiSpace::ProcessMarkStack(bool paused) {
+ timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
+ while (!mark_stack_->IsEmpty()) {
+ ScanObject(mark_stack_->PopBack());
+ }
+ timings_.EndSplit();
+}
+
+// Walks the reference list marking any references subject to the
+// reference clearing policy. References with a black referent are
+// removed from the list. References with white referents biased
+// toward saving are blackened and also removed from the list.
+void SemiSpace::PreserveSomeSoftReferences(Object** list) {
+ DCHECK(list != NULL);
+ Object* clear = NULL;
+ size_t counter = 0;
+ DCHECK(mark_stack_->IsEmpty());
+ timings_.StartSplit("PreserveSomeSoftReferences");
+ while (*list != NULL) {
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent == NULL) {
+ // Referent was cleared by the user during marking.
+ continue;
+ }
+ Object* forward_address = GetMarkedForwardAddress(referent);
+ bool is_marked = forward_address != nullptr;
+ if (!is_marked && ((++counter) & 1)) {
+ // Referent is white and biased toward saving, mark it.
+ forward_address = MarkObject(referent);
+ if (referent != forward_address) {
+ // Update the referent if we moved it.
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ } else {
+ if (!is_marked) {
+ // Referent is white, queue it for clearing.
+ heap_->EnqueuePendingReference(ref, &clear);
+ } else if (referent != forward_address) {
+ CHECK(forward_address != nullptr);
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+ *list = clear;
+ timings_.EndSplit();
+ // Restart the mark with the newly black references added to the root set.
+ ProcessMarkStack(true);
+}
+
+inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ // All immune objects are assumed marked.
+ if (IsImmune(obj)) {
+ return obj;
+ }
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
+ // If the object is forwarded then it MUST be marked.
+ if (to_space_->HasAddress(forwarding_address)) {
+ return forwarding_address;
+ }
+ // Must not be marked, return nullptr;
+ return nullptr;
+ } else if (to_space_->HasAddress(obj)) {
+ // Already forwarded, must be marked.
+ return obj;
+ }
+ return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+}
+
+// Unlink the reference list clearing references objects with white
+// referents. Cleared references registered to a reference queue are
+// scheduled for appending by the heap worker thread.
+void SemiSpace::ClearWhiteReferences(Object** list) {
+ DCHECK(list != NULL);
+ while (*list != NULL) {
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent != nullptr) {
+ Object* forward_address = GetMarkedForwardAddress(referent);
+ if (forward_address == nullptr) {
+ // Referent is white, clear it.
+ heap_->ClearReferenceReferent(ref);
+ if (heap_->IsEnqueuable(ref)) {
+ heap_->EnqueueReference(ref, &cleared_reference_list_);
+ }
+ } else if (referent != forward_address) {
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+ DCHECK(*list == NULL);
+}
+
+// Enqueues finalizer references with white referents. White
+// referents are blackened, moved to the zombie field, and the
+// referent field is cleared.
+void SemiSpace::EnqueueFinalizerReferences(Object** list) {
+ // *list = NULL;
+ // return;
+ DCHECK(list != NULL);
+ timings_.StartSplit("EnqueueFinalizerReferences");
+ MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
+ bool has_enqueued = false;
+ while (*list != NULL) {
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
+ if (referent != nullptr) {
+ Object* forward_address = GetMarkedForwardAddress(referent);
+ // Not marked.
+ if (forward_address == nullptr) {
+ forward_address = MarkObject(referent);
+ // If the referent is non-null the reference must queuable.
+ DCHECK(heap_->IsEnqueuable(ref));
+ // Move the referent to the zombie field.
+ ref->SetFieldObject(zombie_offset, forward_address, false);
+ heap_->ClearReferenceReferent(ref);
+ heap_->EnqueueReference(ref, &cleared_reference_list_);
+ has_enqueued = true;
+ } else if (referent != forward_address) {
+ heap_->SetReferenceReferent(ref, forward_address);
+ }
+ }
+ }
+ timings_.EndSplit();
+ if (has_enqueued) {
+ ProcessMarkStack(true);
+ }
+ DCHECK(*list == NULL);
+}
+
+// Process reference class instances and schedule finalizations.
+void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
+ Object** weak_references,
+ Object** finalizer_references,
+ Object** phantom_references) {
+ CHECK(soft_references != NULL);
+ CHECK(weak_references != NULL);
+ CHECK(finalizer_references != NULL);
+ CHECK(phantom_references != NULL);
+ CHECK(mark_stack_->IsEmpty());
+
+ // Unless we are in the zygote or required to clear soft references
+ // with white references, preserve some white referents.
+ if (!clear_soft && !Runtime::Current()->IsZygote()) {
+ PreserveSomeSoftReferences(soft_references);
+ }
+
+ timings_.StartSplit("ProcessReferences");
+ // Clear all remaining soft and weak references with white
+ // referents.
+ ClearWhiteReferences(soft_references);
+ ClearWhiteReferences(weak_references);
+ timings_.EndSplit();
+
+ // Preserve all white objects with finalize methods and schedule
+ // them for finalization.
+ EnqueueFinalizerReferences(finalizer_references);
+
+ timings_.StartSplit("ProcessReferences");
+ // Clear all f-reachable soft and weak references with white
+ // referents.
+ ClearWhiteReferences(soft_references);
+ ClearWhiteReferences(weak_references);
+
+ // Clear all phantom references with white referents.
+ ClearWhiteReferences(phantom_references);
+
+ // At this point all reference lists should be empty.
+ DCHECK(*soft_references == NULL);
+ DCHECK(*weak_references == NULL);
+ DCHECK(*finalizer_references == NULL);
+ DCHECK(*phantom_references == NULL);
+ timings_.EndSplit();
+}
+
+void SemiSpace::UnBindBitmaps() {
+ base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsDlMallocSpace()) {
+ space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+ if (alloc_space->HasBoundBitmaps()) {
+ alloc_space->UnBindBitmaps();
+ heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
+ alloc_space->GetMarkBitmap());
+ }
+ }
+ }
+}
+
+void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
+ DCHECK(to_space != nullptr);
+ to_space_ = to_space;
+}
+
+void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
+ DCHECK(from_space != nullptr);
+ from_space_ = from_space;
+}
+
+void SemiSpace::FinishPhase() {
+ base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ // Can't enqueue references if we hold the mutator lock.
+ Object* cleared_references = GetClearedReferences();
+ Heap* heap = GetHeap();
+ timings_.NewSplit("EnqueueClearedReferences");
+ heap->EnqueueClearedReferences(&cleared_references);
+
+ timings_.NewSplit("PostGcVerification");
+ heap->PostGcVerification(this);
+
+ // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
+ // further action is done by the heap.
+ to_space_ = nullptr;
+ from_space_ = nullptr;
+
+ // Update the cumulative statistics
+ total_time_ns_ += GetDurationNs();
+ total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+ std::plus<uint64_t>());
+ total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+ total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+
+ // Ensure that the mark stack is empty.
+ CHECK(mark_stack_->IsEmpty());
+
+ // Update the cumulative loggers.
+ cumulative_timings_.Start();
+ cumulative_timings_.AddLogger(timings_);
+ cumulative_timings_.End();
+
+ // Clear all of the spaces' mark bitmaps.
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+ if (bitmap != nullptr &&
+ space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+ bitmap->Clear();
+ }
+ }
+ mark_stack_->Reset();
+
+ // Reset the marked large objects.
+ space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+ large_objects->GetMarkObjects()->Clear();
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
new file mode 100644
index 0000000000..13d519559a
--- /dev/null
+++ b/runtime/gc/collector/semi_space.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+
+#include "atomic_integer.h"
+#include "barrier.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+namespace mirror {
+ class Class;
+ class Object;
+ template<class T> class ObjectArray;
+} // namespace mirror
+
+class StackVisitor;
+class Thread;
+
+namespace gc {
+
+namespace accounting {
+ template <typename T> class AtomicStack;
+ class MarkIfReachesAllocspaceVisitor;
+ class ModUnionClearCardVisitor;
+ class ModUnionVisitor;
+ class ModUnionTableBitmap;
+ class MarkStackChunk;
+ typedef AtomicStack<mirror::Object*> ObjectStack;
+ class SpaceBitmap;
+} // namespace accounting
+
+namespace space {
+ class BumpPointerSpace;
+ class ContinuousMemMapAllocSpace;
+ class ContinuousSpace;
+} // namespace space
+
+class Heap;
+
+namespace collector {
+
+class SemiSpace : public GarbageCollector {
+ public:
+ explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+
+ ~SemiSpace() {}
+
+ virtual void InitializePhase();
+ virtual bool IsConcurrent() const {
+ return false;
+ }
+ virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void MarkReachableObjects()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ virtual GcType GetGcType() const {
+ return kGcTypePartial;
+ }
+
+ // Sets which space we will be copying objects to.
+ void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
+
+ // Set the space where we copy objects from.
+ void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space);
+
+ // Initializes internal structures.
+ void Init();
+
+ // Find the default mark bitmap.
+ void FindDefaultMarkBitmap();
+
+ // Returns the new address of the object.
+ mirror::Object* MarkObject(mirror::Object* object)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ void ScanObject(mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Marks the root set at the start of a garbage collection.
+ void MarkRoots()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Make a space immune, immune spaces have all live objects marked - that is the mark and
+ // live bitmaps are bound together.
+ void ImmuneSpace(space::ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+ // the image. Mark that portion of the heap as immune.
+ virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void BindLiveToMarkBitmap(space::ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void UnBindBitmaps()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void ProcessReferences(Thread* self)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Sweep only pointers within an array. WARNING: Trashes objects.
+ void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ mirror::Object* GetClearedReferences() {
+ return cleared_reference_list_;
+ }
+
+ // TODO: enable thread safety analysis when in use by multiple worker threads.
+ template <typename MarkVisitor>
+ void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ void SweepSystemWeaks()
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+ // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+ // object for non movable things).
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
+
+ static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+ // mark, otherwise we unmark.
+ bool MarkLargeObject(const mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Special sweep for zygote that just marks objects / dirties cards.
+ static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Expand mark stack to 2x its current size.
+ void ResizeMarkStack(size_t new_size);
+
+ // Returns how many threads we should use for the current GC phase based on if we are paused,
+ // whether or not we care about pauses.
+ size_t GetThreadCount(bool paused) const;
+
+ // Returns true if an object is inside of the immune region (assumed to be marked).
+ bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
+ return obj >= immune_begin_ && obj < immune_end_;
+ }
+
+ bool IsImmuneSpace(const space::ContinuousSpace* space) const;
+
+ static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
+ const StackVisitor *visitor);
+
+ void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ template <typename Visitor>
+ static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visit the header, static field references, and interface pointers of a class object.
+ template <typename Visitor>
+ static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visit all of the references in an object array.
+ template <typename Visitor>
+ static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visits the header and field references of a data object.
+ template <typename Visitor>
+ static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ return VisitInstanceFieldsReferences(klass, obj, visitor);
+ }
+
+ // Push an object onto the mark stack.
+ inline void MarkStackPush(mirror::Object* obj);
+
+ void UpdateAndMarkModUnion()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Schedules an unmarked object for reference processing.
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Recursively blackens objects on the mark stack.
+ void ProcessMarkStack(bool paused)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void EnqueueFinalizerReferences(mirror::Object** ref)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void PreserveSomeSoftReferences(mirror::Object** ref)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void ClearWhiteReferences(mirror::Object** list)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+ mirror::Object** weak_references,
+ mirror::Object** finalizer_references,
+ mirror::Object** phantom_references)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+
+ mirror::Object* GetForwardingAddress(mirror::Object* obj);
+
+ // Current space, we check this space first to avoid searching for the appropriate space for an
+ // object.
+ accounting::ObjectStack* mark_stack_;
+
+ // Immune range, every object inside the immune range is assumed to be marked.
+ mirror::Object* immune_begin_;
+ mirror::Object* immune_end_;
+
+ // Destination and source spaces.
+ space::ContinuousMemMapAllocSpace* to_space_;
+ space::ContinuousMemMapAllocSpace* from_space_;
+
+ mirror::Object* soft_reference_list_;
+ mirror::Object* weak_reference_list_;
+ mirror::Object* finalizer_reference_list_;
+ mirror::Object* phantom_reference_list_;
+ mirror::Object* cleared_reference_list_;
+
+ Thread* self_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 9f0bf33387..b27b8dfb46 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -26,7 +26,7 @@ namespace collector {
StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
: PartialMarkSweep(heap, is_concurrent,
- name_prefix + (name_prefix.empty() ? "" : " ") + "sticky") {
+ name_prefix.empty() ? "sticky " : name_prefix) {
cumulative_timings_.SetName(GetName());
}
@@ -38,7 +38,8 @@ void StickyMarkSweep::BindBitmaps() {
// know what was allocated since the last GC. A side-effect of binding the allocation space mark
// and live bitmap is that marking the objects will place them in the live bitmap.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+ if (space->IsDlMallocSpace() &&
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
BindLiveToMarkBitmap(space);
}
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 8bee00f0b8..b6758777af 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -31,10 +31,6 @@ class StickyMarkSweep : public PartialMarkSweep {
return kGcTypeSticky;
}
- // Don't need to do anything special here since we scan all the cards which may have references
- // to the newly allocated objects.
- virtual void UpdateAndMarkModUnion() { }
-
explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
~StickyMarkSweep() {}
@@ -53,6 +49,10 @@ class StickyMarkSweep : public PartialMarkSweep {
void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ // Don't need to do anything special here since we scan all the cards which may have references
+ // to the newly allocated objects.
+ virtual void UpdateAndMarkModUnion() { }
+
private:
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 873eadc46a..1d3c0d8777 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -20,6 +20,7 @@
#include "heap.h"
#include "debugger.h"
+#include "gc/space/bump_pointer_space-inl.h"
#include "gc/space/dlmalloc_space-inl.h"
#include "gc/space/large_object_space.h"
#include "object_utils.h"
@@ -30,8 +31,9 @@
namespace art {
namespace gc {
-inline mirror::Object* Heap::AllocObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
- DebugCheckPreconditionsForAllobObject(c, byte_count);
+inline mirror::Object* Heap::AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* c,
+ size_t byte_count) {
+ DebugCheckPreconditionsForAllocObject(c, byte_count);
mirror::Object* obj;
size_t bytes_allocated;
AllocationTimer alloc_timer(this, &obj);
@@ -39,7 +41,7 @@ inline mirror::Object* Heap::AllocObjectUninstrumented(Thread* self, mirror::Cla
&obj, &bytes_allocated);
if (LIKELY(!large_object_allocation)) {
// Non-large object allocation.
- obj = AllocateUninstrumented(self, alloc_space_, byte_count, &bytes_allocated);
+ obj = AllocateUninstrumented(self, non_moving_space_, byte_count, &bytes_allocated);
// Ensure that we did not allocate into a zygote space.
DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
}
@@ -53,10 +55,45 @@ inline mirror::Object* Heap::AllocObjectUninstrumented(Thread* self, mirror::Cla
if (kDesiredHeapVerification > kNoHeapVerification) {
VerifyObject(obj);
}
- return obj;
+ } else {
+ ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
}
- ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
- return NULL;
+ if (kIsDebugBuild) {
+ self->VerifyStack();
+ }
+ return obj;
+}
+
+inline mirror::Object* Heap::AllocMovableObjectUninstrumented(Thread* self, mirror::Class* c,
+ size_t byte_count) {
+ DebugCheckPreconditionsForAllocObject(c, byte_count);
+ mirror::Object* obj;
+ AllocationTimer alloc_timer(this, &obj);
+ byte_count = (byte_count + 7) & ~7;
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+ }
+ }
+ obj = bump_pointer_space_->AllocNonvirtual(byte_count);
+ if (LIKELY(obj != NULL)) {
+ obj->SetClass(c);
+ DCHECK(!obj->IsClass());
+ // Record allocation after since we want to use the atomic add for the atomic fence to guard
+ // the SetClass since we do not want the class to appear NULL in another thread.
+ num_bytes_allocated_.fetch_add(byte_count);
+ DCHECK(!Dbg::IsAllocTrackingEnabled());
+ if (kDesiredHeapVerification > kNoHeapVerification) {
+ VerifyObject(obj);
+ }
+ } else {
+ ThrowOutOfMemoryError(self, byte_count, false);
+ }
+ if (kIsDebugBuild) {
+ self->VerifyStack();
+ }
+ return obj;
}
inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) {
@@ -124,7 +161,7 @@ inline bool Heap::TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class*
return large_object_allocation;
}
-inline void Heap::DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count) {
+inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
(c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
strlen(ClassHelper(c).GetDescriptor()) == 0);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index de3ab0eb9d..69ca6202f9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -30,11 +30,14 @@
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/collector/partial_mark_sweep.h"
+#include "gc/collector/semi_space.h"
#include "gc/collector/sticky_mark_sweep.h"
+#include "gc/space/bump_pointer_space.h"
#include "gc/space/dlmalloc_space-inl.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
@@ -70,9 +73,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
bool ignore_max_footprint)
- : alloc_space_(NULL),
- card_table_(NULL),
- concurrent_gc_(concurrent_gc),
+ : non_moving_space_(NULL),
+ concurrent_gc_(!kMovingCollector && concurrent_gc),
parallel_gc_threads_(parallel_gc_threads),
conc_gc_threads_(conc_gc_threads),
low_memory_mode_(low_memory_mode),
@@ -92,6 +94,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
max_allowed_footprint_(initial_size),
native_footprint_gc_watermark_(initial_size),
native_footprint_limit_(2 * initial_size),
+ native_need_to_run_finalization_(false),
activity_thread_class_(NULL),
application_thread_class_(NULL),
activity_thread_(NULL),
@@ -122,7 +125,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
* searching.
*/
max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
- : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB),
+ : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+ bump_pointer_space_(nullptr),
+ temp_space_(nullptr),
reference_referent_offset_(0),
reference_queue_offset_(0),
reference_queueNext_offset_(0),
@@ -134,6 +139,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
total_wait_time_(0),
total_allocation_time_(0),
verify_object_mode_(kHeapVerificationNotPermitted),
+ gc_disable_count_(0),
running_on_valgrind_(RUNNING_ON_VALGRIND) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
@@ -147,7 +153,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (!image_file_name.empty()) {
space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
CHECK(image_space != NULL) << "Failed to create space for " << image_file_name;
- AddContinuousSpace(image_space);
+ AddSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
// isn't going to get in the middle
byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
@@ -159,13 +165,28 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
- alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space",
- initial_size,
- growth_limit, capacity,
- requested_alloc_space_begin);
- CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
- alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
- AddContinuousSpace(alloc_space_);
+ const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
+ non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
+ requested_alloc_space_begin);
+
+ if (kMovingCollector) {
+ // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
+ // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
+ // issues.
+ const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB);
+ bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
+ bump_pointer_space_size, nullptr);
+ CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
+ AddSpace(bump_pointer_space_);
+ temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
+ nullptr);
+ CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
+ AddSpace(temp_space_);
+ }
+
+ CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space";
+ non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
+ AddSpace(non_moving_space_);
// Allocate the large object space.
const bool kUseFreeListSpaceForLOS = false;
@@ -175,22 +196,23 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
}
CHECK(large_object_space_ != NULL) << "Failed to create large object space";
- AddDiscontinuousSpace(large_object_space_);
+ AddSpace(large_object_space_);
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
+ CHECK(!continuous_spaces_.empty());
+ // Relies on the spaces being sorted.
byte* heap_begin = continuous_spaces_.front()->Begin();
- size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin();
- if (continuous_spaces_.back()->IsDlMallocSpace()) {
- heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
- }
+ byte* heap_end = continuous_spaces_.back()->Limit();
+ size_t heap_capacity = heap_end - heap_begin;
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != NULL) << "Failed to create card table";
+ // Card cache for now since it makes it easier for us to update the references to the copying
+ // spaces.
accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
- GetImageSpace());
+ new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
AddModUnionTable(mod_union_table);
@@ -223,19 +245,23 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (ignore_max_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
- concurrent_start_bytes_ = max_allowed_footprint_;
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
+ CHECK_NE(max_allowed_footprint_, 0U);
// Create our garbage collectors.
- for (size_t i = 0; i < 2; ++i) {
- const bool concurrent = i != 0;
- mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent));
- mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
- mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ if (!kMovingCollector) {
+ for (size_t i = 0; i < 2; ++i) {
+ const bool concurrent = i != 0;
+ garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ }
+ } else {
+ semi_space_collector_ = new collector::SemiSpace(this);
+ garbage_collectors_.push_back(semi_space_collector_);
}
- CHECK_NE(max_allowed_footprint_, 0U);
-
if (running_on_valgrind_) {
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
}
@@ -245,6 +271,41 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
+bool Heap::IsCompilingBoot() const {
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsImageSpace()) {
+ return false;
+ } else if (space->IsZygoteSpace()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Heap::HasImageSpace() const {
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsImageSpace()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Heap::IncrementDisableGC(Thread* self) {
+ // Need to do this holding the lock to prevent races where the GC is about to run / running when
+ // we attempt to disable it.
+ ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ MutexLock mu(self, *gc_complete_lock_);
+ WaitForGcToCompleteLocked(self);
+ ++gc_disable_count_;
+}
+
+void Heap::DecrementDisableGC(Thread* self) {
+ MutexLock mu(self, *gc_complete_lock_);
+ CHECK_GE(gc_disable_count_, 0U);
+ --gc_disable_count_;
+}
+
void Heap::CreateThreadPool() {
const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
if (num_threads != 0) {
@@ -252,12 +313,49 @@ void Heap::CreateThreadPool() {
}
}
+void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
+ // Visit objects in bump pointer space.
+ Thread* self = Thread::Current();
+ // TODO: Use reference block.
+ std::vector<SirtRef<mirror::Object>*> saved_refs;
+ if (bump_pointer_space_ != nullptr) {
+ // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data
+ // structure.
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin());
+ const mirror::Object* end = reinterpret_cast<const mirror::Object*>(
+ bump_pointer_space_->End());
+ while (obj < end) {
+ saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
+ obj = space::BumpPointerSpace::GetNextObject(obj);
+ }
+ }
+ // TODO: Switch to standard begin and end to use ranged a based loop.
+ for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
+ it < end; ++it) {
+ mirror::Object* obj = *it;
+ // Objects in the allocation stack might be in a movable space.
+ saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
+ }
+ GetLiveBitmap()->Walk(callback, arg);
+ for (const auto& ref : saved_refs) {
+ callback(ref->get(), arg);
+ }
+ // Need to free the sirts in reverse order they were allocated.
+ for (size_t i = saved_refs.size(); i != 0; --i) {
+ delete saved_refs[i - 1];
+ }
+}
+
+void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
+ MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack);
+}
+
void Heap::DeleteThreadPool() {
thread_pool_.reset(nullptr);
}
static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
- CHECK(out_value != NULL);
+ DCHECK(out_value != NULL);
jfieldID field = env->GetStaticFieldID(clz, name, "I");
if (field == NULL) {
env->ExceptionClear();
@@ -374,62 +472,71 @@ void Heap::ListenForProcessStateChange() {
}
}
-void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+void Heap::AddSpace(space::Space* space) {
DCHECK(space != NULL);
- DCHECK(space->GetLiveBitmap() != NULL);
- live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap());
- DCHECK(space->GetMarkBitmap() != NULL);
- mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap());
- continuous_spaces_.push_back(space);
- if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) {
- alloc_space_ = space->AsDlMallocSpace();
- }
-
- // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
- std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
- [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
- return a->Begin() < b->Begin();
- });
-
- // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
- // avoid redundant marking.
- bool seen_zygote = false, seen_alloc = false;
- for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- DCHECK(!seen_zygote);
- DCHECK(!seen_alloc);
- } else if (space->IsZygoteSpace()) {
- DCHECK(!seen_alloc);
- seen_zygote = true;
- } else if (space->IsDlMallocSpace()) {
- seen_alloc = true;
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ if (space->IsContinuousSpace()) {
+ DCHECK(!space->IsDiscontinuousSpace());
+ space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
+ // Continuous spaces don't necessarily have bitmaps.
+ accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
+ if (live_bitmap != nullptr) {
+ DCHECK(mark_bitmap != nullptr);
+ live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
+ mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
+ }
+
+ continuous_spaces_.push_back(continuous_space);
+ if (continuous_space->IsDlMallocSpace()) {
+ non_moving_space_ = continuous_space->AsDlMallocSpace();
+ }
+
+ // Ensure that spaces remain sorted in increasing order of start address.
+ std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
+ [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
+ return a->Begin() < b->Begin();
+ });
+ // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
+ // avoid redundant marking.
+ bool seen_zygote = false, seen_alloc = false;
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsImageSpace()) {
+ CHECK(!seen_zygote);
+ CHECK(!seen_alloc);
+ } else if (space->IsZygoteSpace()) {
+ CHECK(!seen_alloc);
+ seen_zygote = true;
+ } else if (space->IsDlMallocSpace()) {
+ seen_alloc = true;
+ }
}
+ } else {
+ DCHECK(space->IsDiscontinuousSpace());
+ space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
+ DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
+ live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
+ DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
+ mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+ discontinuous_spaces_.push_back(discontinuous_space);
+ }
+ if (space->IsAllocSpace()) {
+ alloc_spaces_.push_back(space->AsAllocSpace());
}
}
void Heap::RegisterGCAllocation(size_t bytes) {
- if (this != NULL) {
+ if (this != nullptr) {
gc_memory_overhead_.fetch_add(bytes);
}
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
- if (this != NULL) {
+ if (this != nullptr) {
gc_memory_overhead_.fetch_sub(bytes);
}
}
-void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) {
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- DCHECK(space != NULL);
- DCHECK(space->GetLiveObjects() != NULL);
- live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects());
- DCHECK(space->GetMarkObjects() != NULL);
- mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects());
- discontinuous_spaces_.push_back(space);
-}
-
void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative timings.
os << "Dumping cumulative Gc timings\n";
@@ -437,7 +544,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
- for (const auto& collector : mark_sweep_collectors_) {
+ for (const auto& collector : garbage_collectors_) {
CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
os << Dumpable<CumulativeLogger>(logger);
@@ -480,17 +587,14 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
}
Heap::~Heap() {
+ VLOG(heap) << "Starting ~Heap()";
if (kDumpGcPerformanceOnShutdown) {
DumpGcPerformanceInfo(LOG(INFO));
}
-
- STLDeleteElements(&mark_sweep_collectors_);
-
- // If we don't reset then the mark stack complains in it's destructor.
+ STLDeleteElements(&garbage_collectors_);
+ // If we don't reset then the mark stack complains in its destructor.
allocation_stack_->Reset();
live_stack_->Reset();
-
- VLOG(heap) << "~Heap()";
STLDeleteValues(&mod_union_tables_);
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
@@ -499,6 +603,7 @@ Heap::~Heap() {
delete weak_ref_queue_lock_;
delete finalizer_ref_queue_lock_;
delete phantom_ref_queue_lock_;
+ VLOG(heap) << "Finished ~Heap()";
}
space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
@@ -579,7 +684,7 @@ inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c
mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated);
// Make sure that our large object didn't get placed anywhere within the space interval or else
// it breaks the immune range.
- DCHECK(obj == NULL ||
+ DCHECK(obj == nullptr ||
reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
*obj_ptr = obj;
@@ -587,16 +692,59 @@ inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c
return large_object_allocation;
}
-mirror::Object* Heap::AllocObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
- DebugCheckPreconditionsForAllobObject(c, byte_count);
+mirror::Object* Heap::AllocMovableObjectInstrumented(Thread* self, mirror::Class* c,
+ size_t byte_count) {
+ DebugCheckPreconditionsForAllocObject(c, byte_count);
+ mirror::Object* obj;
+ AllocationTimer alloc_timer(this, &obj);
+ byte_count = RoundUp(byte_count, 8);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+ }
+ }
+ obj = bump_pointer_space_->AllocNonvirtual(byte_count);
+ if (LIKELY(obj != NULL)) {
+ obj->SetClass(c);
+ DCHECK(!obj->IsClass());
+ // Record allocation after since we want to use the atomic add for the atomic fence to guard
+ // the SetClass since we do not want the class to appear NULL in another thread.
+ num_bytes_allocated_.fetch_add(byte_count);
+ if (Runtime::Current()->HasStatsEnabled()) {
+ RuntimeStats* thread_stats = Thread::Current()->GetStats();
+ ++thread_stats->allocated_objects;
+ thread_stats->allocated_bytes += byte_count;
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ ++global_stats->allocated_objects;
+ global_stats->allocated_bytes += byte_count;
+ }
+ if (Dbg::IsAllocTrackingEnabled()) {
+ Dbg::RecordAllocation(c, byte_count);
+ }
+ if (kDesiredHeapVerification > kNoHeapVerification) {
+ VerifyObject(obj);
+ }
+ } else {
+ ThrowOutOfMemoryError(self, byte_count, false);
+ }
+ if (kIsDebugBuild) {
+ self->VerifyStack();
+ }
+ return obj;
+}
+
+mirror::Object* Heap::AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* c,
+ size_t byte_count) {
+ DebugCheckPreconditionsForAllocObject(c, byte_count);
mirror::Object* obj;
size_t bytes_allocated;
AllocationTimer alloc_timer(this, &obj);
- bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count,
- &obj, &bytes_allocated);
+ bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count, &obj,
+ &bytes_allocated);
if (LIKELY(!large_object_allocation)) {
// Non-large object allocation.
- obj = AllocateInstrumented(self, alloc_space_, byte_count, &bytes_allocated);
+ obj = AllocateInstrumented(self, non_moving_space_, byte_count, &bytes_allocated);
// Ensure that we did not allocate into a zygote space.
DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
}
@@ -612,28 +760,66 @@ mirror::Object* Heap::AllocObjectInstrumented(Thread* self, mirror::Class* c, si
if (kDesiredHeapVerification > kNoHeapVerification) {
VerifyObject(obj);
}
- return obj;
+ } else {
+ ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
}
- ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
- return NULL;
+ if (kIsDebugBuild) {
+ self->VerifyStack();
+ }
+ return obj;
}
-bool Heap::IsHeapAddress(const mirror::Object* obj) {
- // Note: we deliberately don't take the lock here, and mustn't test anything that would
- // require taking the lock.
- if (obj == NULL) {
+void Heap::Trim() {
+ uint64_t start_ns = NanoTime();
+ // Trim the managed spaces.
+ uint64_t total_alloc_space_allocated = 0;
+ uint64_t total_alloc_space_size = 0;
+ uint64_t managed_reclaimed = 0;
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsDlMallocSpace() && !space->IsZygoteSpace()) {
+ gc::space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+ total_alloc_space_size += alloc_space->Size();
+ managed_reclaimed += alloc_space->Trim();
+ }
+ }
+ total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
+ bump_pointer_space_->GetBytesAllocated();
+ const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
+ static_cast<float>(total_alloc_space_size);
+ uint64_t gc_heap_end_ns = NanoTime();
+ // Trim the native heap.
+ dlmalloc_trim(0);
+ size_t native_reclaimed = 0;
+ dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
+ uint64_t end_ns = NanoTime();
+ VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
+ << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
+ << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
+ << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
+ << "%.";
+}
+
+bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
+ // Note: we deliberately don't take the lock here, and mustn't test anything that would require
+ // taking the lock.
+ if (obj == nullptr) {
return true;
}
- if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
- return false;
+ return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+}
+
+bool Heap::IsHeapAddress(const mirror::Object* obj) const {
+ if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
+ return true;
}
- return FindSpaceFromObject(obj, true) != NULL;
+ // TODO: This probably doesn't work for large objects.
+ return FindSpaceFromObject(obj, true) != nullptr;
}
bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
bool search_live_stack, bool sorted) {
// Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
- if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
return false;
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
@@ -642,6 +828,8 @@ bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_
if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
}
+ } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
+ return true;
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
if (d_space != NULL) {
@@ -699,16 +887,20 @@ void Heap::VerifyObjectImpl(const mirror::Object* obj) {
VerifyObjectBody(obj);
}
-void Heap::DumpSpaces() {
+void Heap::DumpSpaces(std::ostream& stream) {
for (const auto& space : continuous_spaces_) {
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- LOG(INFO) << space << " " << *space << "\n"
- << live_bitmap << " " << *live_bitmap << "\n"
- << mark_bitmap << " " << *mark_bitmap;
+ stream << space << " " << *space << "\n";
+ if (live_bitmap != nullptr) {
+ stream << live_bitmap << " " << *live_bitmap << "\n";
+ }
+ if (mark_bitmap != nullptr) {
+ stream << mark_bitmap << " " << *mark_bitmap << "\n";
+ }
}
for (const auto& space : discontinuous_spaces_) {
- LOG(INFO) << space << " " << *space << "\n";
+ stream << space << " " << *space << "\n";
}
}
@@ -735,7 +927,7 @@ void Heap::VerifyObjectBody(const mirror::Object* obj) {
const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
CHECK_EQ(c_c, c_c_c);
- if (verify_object_mode_ != kVerifyAllFast) {
+ if (verify_object_mode_ > kVerifyAllFast) {
// TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
// heap_bitmap_lock_.
if (!IsLiveObjectLocked(obj)) {
@@ -811,7 +1003,7 @@ inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::Allo
inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
bool grow, size_t* bytes_allocated) {
if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
+ return nullptr;
}
if (LIKELY(!running_on_valgrind_)) {
return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
@@ -841,7 +1033,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
- collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
+ collector::GcType last_gc = WaitForGcToComplete(self);
if (last_gc != collector::kGcTypeNone) {
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
@@ -857,9 +1049,10 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
collector::GcType gc_type = static_cast<collector::GcType>(i);
switch (gc_type) {
case collector::kGcTypeSticky: {
- const size_t alloc_space_size = alloc_space_->Size();
+ const size_t alloc_space_size = non_moving_space_->Size();
run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
- alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_;
+ non_moving_space_->Capacity() - alloc_space_size >=
+ min_remaining_space_for_sticky_gc_;
break;
}
case collector::kGcTypePartial:
@@ -869,7 +1062,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
run_gc = true;
break;
default:
- break;
+ LOG(FATAL) << "Invalid GC type";
}
if (run_gc) {
@@ -897,11 +1090,11 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
// or the requested size is really big. Do another GC, collecting SoftReferences this time. The
// VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
- // OLD-TODO: wait for the finalizers from the previous GC to finish
+ // TODO: Run finalization, but this can cause more allocations to occur.
VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
<< " allocation";
- // We don't need a WaitForConcurrentGcToComplete here either.
+ // We don't need a WaitForGcToComplete here either.
CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
}
@@ -914,51 +1107,24 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
size_t total = 0;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
- if (space->IsDlMallocSpace()) {
- total += space->AsDlMallocSpace()->GetObjectsAllocated();
- }
- }
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
- total += space->AsLargeObjectSpace()->GetObjectsAllocated();
+ for (space::AllocSpace* space : alloc_spaces_) {
+ total += space->GetObjectsAllocated();
}
return total;
}
size_t Heap::GetObjectsAllocatedEver() const {
size_t total = 0;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
- if (space->IsDlMallocSpace()) {
- total += space->AsDlMallocSpace()->GetTotalObjectsAllocated();
- }
- }
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
- total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated();
+ for (space::AllocSpace* space : alloc_spaces_) {
+ total += space->GetTotalObjectsAllocated();
}
return total;
}
size_t Heap::GetBytesAllocatedEver() const {
size_t total = 0;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
- if (space->IsDlMallocSpace()) {
- total += space->AsDlMallocSpace()->GetTotalBytesAllocated();
- }
- }
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
- total += space->AsLargeObjectSpace()->GetTotalBytesAllocated();
+ for (space::AllocSpace* space : alloc_spaces_) {
+ total += space->GetTotalBytesAllocated();
}
return total;
}
@@ -1056,8 +1222,8 @@ class ReferringObjectsFinder {
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- collector::MarkSweep::VisitObjectReferences(obj, *this, true);
+ void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
}
// For MarkSweep::VisitObjectReferences.
@@ -1093,56 +1259,69 @@ void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
void Heap::CollectGarbage(bool clear_soft_references) {
// Even if we waited for a GC we still need to do another GC since weaks allocated during the
// last GC will not have necessarily been cleared.
- Thread* self = Thread::Current();
- WaitForConcurrentGcToComplete(self);
CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
}
void Heap::PreZygoteFork() {
static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
- // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
- CollectGarbage(false);
Thread* self = Thread::Current();
MutexLock mu(self, zygote_creation_lock_);
-
// Try to see if we have any Zygote spaces.
if (have_zygote_space_) {
return;
}
-
- VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
-
- {
- // Flush the alloc stack.
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- FlushAllocStack();
+ VLOG(heap) << "Starting PreZygoteFork";
+ // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
+ // Trim the pages at the end of the non moving space.
+ non_moving_space_->Trim();
+ non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ // Create a new bump pointer space which we will compact into.
+ if (semi_space_collector_ != nullptr) {
+ space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
+ non_moving_space_->Limit());
+ // Compact the bump pointer space to a new zygote bump pointer space.
+ temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ Compact(&target_space, bump_pointer_space_);
+ CHECK_EQ(temp_space_->GetBytesAllocated(), 0U);
+ total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
+ total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
+ // Update the end and write out image.
+ non_moving_space_->SetEnd(target_space.End());
+ non_moving_space_->SetLimit(target_space.Limit());
+ accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
+ // Record the allocations in the bitmap.
+ VLOG(heap) << "Recording zygote allocations";
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin());
+ const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End());
+ while (obj < end) {
+ bitmap->Set(obj);
+ obj = space::BumpPointerSpace::GetNextObject(obj);
+ }
}
-
- // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
- // of the remaining available heap memory.
- space::DlMallocSpace* zygote_space = alloc_space_;
- alloc_space_ = zygote_space->CreateZygoteSpace("alloc space");
- alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
-
+ // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
+ // the remaining available heap memory.
+ space::DlMallocSpace* zygote_space = non_moving_space_;
+ non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space");
+ non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
// Change the GC retention policy of the zygote space to only collect when full.
zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
- AddContinuousSpace(alloc_space_);
+ AddSpace(non_moving_space_);
have_zygote_space_ = true;
-
+ zygote_space->InvalidateMSpace();
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
AddModUnionTable(mod_union_table);
-
// Reset the cumulative loggers since we now have a few additional timing phases.
- for (const auto& collector : mark_sweep_collectors_) {
+ for (const auto& collector : garbage_collectors_) {
collector->ResetCumulativeStatistics();
}
}
void Heap::FlushAllocStack() {
- MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
+ MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
allocation_stack_.get());
allocation_stack_->Reset();
}
@@ -1161,86 +1340,111 @@ void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetM
}
}
+const char* PrettyCause(GcCause cause) {
+ switch (cause) {
+ case kGcCauseForAlloc: return "Alloc";
+ case kGcCauseBackground: return "Background";
+ case kGcCauseExplicit: return "Explicit";
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+ return "";
+}
+
+void Heap::SwapSemiSpaces() {
+ // Swap the spaces so we allocate into the space which we just evacuated.
+ std::swap(bump_pointer_space_, temp_space_);
+}
-const char* gc_cause_and_type_strings[3][4] = {
- {"", "GC Alloc Sticky", "GC Alloc Partial", "GC Alloc Full"},
- {"", "GC Background Sticky", "GC Background Partial", "GC Background Full"},
- {"", "GC Explicit Sticky", "GC Explicit Partial", "GC Explicit Full"}};
+void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space) {
+ CHECK(kMovingCollector);
+ CHECK_NE(target_space, source_space) << "In-place compaction unsupported";
+ if (target_space != source_space) {
+ semi_space_collector_->SetFromSpace(source_space);
+ semi_space_collector_->SetToSpace(target_space);
+ semi_space_collector_->Run(false);
+ }
+}
collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
bool clear_soft_references) {
Thread* self = Thread::Current();
-
+ Runtime* runtime = Runtime::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
-
if (self->IsHandlingStackOverflow()) {
LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
}
-
- // Ensure there is only one GC at a time.
- bool start_collect = false;
- while (!start_collect) {
- {
- MutexLock mu(self, *gc_complete_lock_);
- if (!is_gc_running_) {
- is_gc_running_ = true;
- start_collect = true;
- }
- }
- if (!start_collect) {
- // TODO: timinglog this.
- WaitForConcurrentGcToComplete(self);
-
- // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
- // Not doing at the moment to ensure soft references are cleared.
+ {
+ gc_complete_lock_->AssertNotHeld(self);
+ MutexLock mu(self, *gc_complete_lock_);
+ // Ensure there is only one GC at a time.
+ WaitForGcToCompleteLocked(self);
+ // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
+ // Not doing at the moment to ensure soft references are cleared.
+ // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
+ if (gc_disable_count_ != 0) {
+ LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_;
+ return collector::kGcTypeNone;
}
+ is_gc_running_ = true;
}
- gc_complete_lock_->AssertNotHeld(self);
- if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) {
- ++Runtime::Current()->GetStats()->gc_for_alloc_count;
- ++Thread::Current()->GetStats()->gc_for_alloc_count;
+ if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
+ ++runtime->GetStats()->gc_for_alloc_count;
+ ++self->GetStats()->gc_for_alloc_count;
}
uint64_t gc_start_time_ns = NanoTime();
uint64_t gc_start_size = GetBytesAllocated();
// Approximate allocation rate in bytes / second.
- if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) {
- LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_).";
- }
uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
- if (ms_delta != 0) {
+ // Back to back GCs can cause 0 ms of wait time in between GC invocations.
+ if (LIKELY(ms_delta != 0)) {
allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
}
if (gc_type == collector::kGcTypeSticky &&
- alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
+ non_moving_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
gc_type = collector::kGcTypePartial;
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
- DCHECK_LE(gc_cause, kGcCauseExplicit);
- ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]);
-
- collector::MarkSweep* collector = NULL;
- for (const auto& cur_collector : mark_sweep_collectors_) {
- if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
+ collector::GarbageCollector* collector = nullptr;
+ if (kMovingCollector) {
+ gc_type = semi_space_collector_->GetGcType();
+ CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U);
+ semi_space_collector_->SetFromSpace(bump_pointer_space_);
+ semi_space_collector_->SetToSpace(temp_space_);
+ mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+ }
+ for (const auto& cur_collector : garbage_collectors_) {
+ if (cur_collector->IsConcurrent() == concurrent_gc_ &&
+ cur_collector->GetGcType() == gc_type) {
collector = cur_collector;
break;
}
}
+ if (kMovingCollector) {
+ gc_type = collector::kGcTypeFull;
+ }
CHECK(collector != NULL)
<< "Could not find garbage collector with concurrent=" << concurrent_gc_
<< " and type=" << gc_type;
- collector->clear_soft_references_ = clear_soft_references;
- collector->Run();
+ ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
+
+ collector->Run(clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
+
+ // Grow the heap so that we know when to perform the next GC.
+ GrowForUtilization(gc_type, collector->GetDurationNs());
+
if (care_about_pause_times_) {
const size_t duration = collector->GetDurationNs();
std::vector<uint64_t> pauses = collector->GetPauseTimes();
@@ -1252,7 +1456,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
was_slow = was_slow || pause > long_pause_log_threshold_;
}
}
-
if (was_slow) {
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetBytesAllocated();
@@ -1327,7 +1530,6 @@ class VerifyReferenceVisitor {
accounting::CardTable* card_table = heap_->GetCardTable();
accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
-
if (!failed_) {
// Print message on only on first failure to prevent spam.
LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
@@ -1337,7 +1539,7 @@ class VerifyReferenceVisitor {
byte* card_addr = card_table->CardFromAddr(obj);
LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
<< offset << "\n card value = " << static_cast<int>(*card_addr);
- if (heap_->IsHeapAddress(obj->GetClass())) {
+ if (heap_->IsValidObjectAddress(obj->GetClass())) {
LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
} else {
LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
@@ -1345,7 +1547,7 @@ class VerifyReferenceVisitor {
// Attmept to find the class inside of the recently freed objects.
space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
- if (ref_space->IsDlMallocSpace()) {
+ if (ref_space != nullptr && ref_space->IsDlMallocSpace()) {
space::DlMallocSpace* space = ref_space->AsDlMallocSpace();
mirror::Class* ref_class = space->FindRecentFreedObject(ref);
if (ref_class != nullptr) {
@@ -1356,7 +1558,7 @@ class VerifyReferenceVisitor {
}
}
- if (ref->GetClass() != nullptr && heap_->IsHeapAddress(ref->GetClass()) &&
+ if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
ref->GetClass()->IsClass()) {
LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
} else {
@@ -1427,17 +1629,25 @@ class VerifyObjectVisitor {
public:
explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
- void operator()(const mirror::Object* obj) const
+ void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_);
// The class doesn't count as a reference but we should verify it anyways.
- visitor(obj, obj->GetClass(), MemberOffset(0), false);
- collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+ if (obj->GetClass()->IsReferenceClass()) {
+ visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
+ }
failed_ = failed_ || visitor.Failed();
}
+ static void VisitCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
+ visitor->operator()(obj);
+ }
+
bool Failed() const {
return failed_;
}
@@ -1453,18 +1663,15 @@ bool Heap::VerifyHeapReferences() {
// Lets sort our allocation stacks so that we can efficiently binary search them.
allocation_stack_->Sort();
live_stack_->Sort();
- // Perform the verification.
VerifyObjectVisitor visitor(this);
- Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
- GetLiveBitmap()->Visit(visitor);
// Verify objects in the allocation stack since these will be objects which were:
// 1. Allocated prior to the GC (pre GC verification).
// 2. Allocated during the GC (pre sweep GC verification).
- for (mirror::Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
- visitor(*it);
- }
// We don't want to verify the objects in the live stack since they themselves may be
// pointing to dead objects if they are not reachable.
+ VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
+ // Verify the roots:
+ Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
if (visitor.Failed()) {
// Dump mod-union tables.
for (const auto& table_pair : mod_union_tables_) {
@@ -1557,7 +1764,7 @@ class VerifyLiveStackReferences {
void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
- collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+ collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
}
bool Failed() const {
@@ -1610,10 +1817,14 @@ void Heap::ProcessCards(base::TimingLogger& timings) {
"ImageModUnionClearCards";
base::TimingLogger::ScopedSplit split(name, &timings);
table->ClearCards();
- } else {
+ } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
+ // TODO: Don't need to use atomic.
+ // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
+ // roots and then we scan / update mod union tables after. We will always scan either card.//
+ // If we end up with the non aged card, we scan it it in the pause.
card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
}
}
@@ -1692,36 +1903,27 @@ void Heap::PostGcVerification(collector::GarbageCollector* gc) {
}
}
-collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
+collector::GcType Heap::WaitForGcToComplete(Thread* self) {
+ ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ MutexLock mu(self, *gc_complete_lock_);
+ return WaitForGcToCompleteLocked(self);
+}
+
+collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
collector::GcType last_gc_type = collector::kGcTypeNone;
- if (concurrent_gc_) {
- ATRACE_BEGIN("GC: Wait For Concurrent");
- bool do_wait;
- uint64_t wait_start = NanoTime();
- {
- // Check if GC is running holding gc_complete_lock_.
- MutexLock mu(self, *gc_complete_lock_);
- do_wait = is_gc_running_;
- }
- if (do_wait) {
- uint64_t wait_time;
- // We must wait, change thread state then sleep on gc_complete_cond_;
- ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
- {
- MutexLock mu(self, *gc_complete_lock_);
- while (is_gc_running_) {
- gc_complete_cond_->Wait(self);
- }
- last_gc_type = last_gc_type_;
- wait_time = NanoTime() - wait_start;
- total_wait_time_ += wait_time;
- }
- if (wait_time > long_pause_log_threshold_) {
- LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
- }
- }
+ uint64_t wait_start = NanoTime();
+ while (is_gc_running_) {
+ ATRACE_BEGIN("GC: Wait For Completion");
+ // We must wait, change thread state then sleep on gc_complete_cond_;
+ gc_complete_cond_->Wait(self);
+ last_gc_type = last_gc_type_;
ATRACE_END();
}
+ uint64_t wait_time = NanoTime() - wait_start;
+ total_wait_time_ += wait_time;
+ if (wait_time > long_pause_log_threshold_) {
+ LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
+ }
return last_gc_type;
}
@@ -1744,6 +1946,23 @@ void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
max_allowed_footprint_ = max_allowed_footprint;
}
+bool Heap::IsMovableObject(const mirror::Object* obj) const {
+ if (kMovingCollector) {
+ DCHECK(!IsInTempSpace(obj));
+ if (bump_pointer_space_->HasAddress(obj)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Heap::IsInTempSpace(const mirror::Object* obj) const {
+ if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
+ return true;
+ }
+ return false;
+}
+
void Heap::UpdateMaxNativeFootprint() {
size_t native_size = native_bytes_allocated_;
// TODO: Tune the native heap utilization to be a value other than the java heap utilization.
@@ -1773,6 +1992,7 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
} else if (target_size < bytes_allocated + min_free_) {
target_size = bytes_allocated + min_free_;
}
+ native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
// Based on how close the current heap size is to the target size, decide
@@ -1796,7 +2016,6 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
if (concurrent_gc_) {
// Calculate when to perform the next ConcurrentGC.
-
// Calculate the estimated GC duration.
double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
@@ -1817,13 +2036,11 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
DCHECK_LE(max_allowed_footprint_, growth_limit_);
}
}
-
- UpdateMaxNativeFootprint();
}
void Heap::ClearGrowthLimit() {
growth_limit_ = capacity_;
- alloc_space_->ClearGrowthLimit();
+ non_moving_space_->ClearGrowthLimit();
}
void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
@@ -1843,6 +2060,12 @@ void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
}
+void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
+ DCHECK(reference != NULL);
+ DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
+ reference->SetFieldObject(reference_referent_offset_, referent, true);
+}
+
mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
DCHECK(reference != NULL);
DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
@@ -1852,7 +2075,7 @@ mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
void Heap::ClearReferenceReferent(mirror::Object* reference) {
DCHECK(reference != NULL);
DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- reference->SetFieldObject(reference_referent_offset_, NULL, true);
+ reference->SetFieldObject(reference_referent_offset_, nullptr, true);
}
// Returns true if the reference object has not yet been enqueued.
@@ -1924,19 +2147,41 @@ void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
+void Heap::PrintReferenceQueue(std::ostream& os, mirror::Object** queue) {
+ os << "Refernece queue " << queue << "\n";
+ if (queue != nullptr) {
+ mirror::Object* list = *queue;
+ if (list != nullptr) {
+ mirror::Object* cur = list;
+ do {
+ mirror::Object* pending_next =
+ cur->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+ os << "PendingNext=" << pending_next;
+ if (cur->GetClass()->IsFinalizerReferenceClass()) {
+ os << " Zombie=" <<
+ cur->GetFieldObject<mirror::Object*>(finalizer_reference_zombie_offset_, false);
+ }
+ os << "\n";
+ cur = pending_next;
+ } while (cur != list);
+ }
+ }
+}
+
void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
- DCHECK(cleared != NULL);
- if (*cleared != NULL) {
+ DCHECK(cleared != nullptr);
+ mirror::Object* list = *cleared;
+ if (list != nullptr) {
// When a runtime isn't started there are no reference queues to care about so ignore.
if (LIKELY(Runtime::Current()->IsStarted())) {
ScopedObjectAccess soa(Thread::Current());
JValue result;
ArgArray arg_array(NULL, 0);
- arg_array.Append(reinterpret_cast<uint32_t>(*cleared));
+ arg_array.Append(reinterpret_cast<uint32_t>(list));
soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
- *cleared = NULL;
+ *cleared = nullptr;
}
}
@@ -1944,42 +2189,27 @@ void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
DCHECK(concurrent_gc_);
- if (runtime == NULL || !runtime->IsFinishedStarting() ||
- !runtime->IsConcurrentGcEnabled()) {
- return;
- }
- {
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- if (runtime->IsShuttingDown()) {
- return;
- }
- }
- if (self->IsHandlingStackOverflow()) {
+ if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
+ self->IsHandlingStackOverflow()) {
return;
}
-
// We already have a request pending, no reason to start more until we update
// concurrent_start_bytes_.
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
-
JNIEnv* env = self->GetJniEnv();
- DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
- DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
+ DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
+ DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
WellKnownClasses::java_lang_Daemons_requestGC);
CHECK(!env->ExceptionCheck());
}
void Heap::ConcurrentGC(Thread* self) {
- {
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- if (Runtime::Current()->IsShuttingDown()) {
- return;
- }
+ if (Runtime::Current()->IsShuttingDown(self)) {
+ return;
}
-
// Wait for any GCs currently running to finish.
- if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
+ if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false);
}
}
@@ -1998,26 +2228,18 @@ void Heap::RequestHeapTrim() {
// We could try mincore(2) but that's only a measure of how many pages we haven't given away,
// not how much use we're making of those pages.
uint64_t ms_time = MilliTime();
- // Note the large object space's bytes allocated is equal to its capacity.
- uint64_t los_bytes_allocated = large_object_space_->GetBytesAllocated();
- float utilization = static_cast<float>(GetBytesAllocated() - los_bytes_allocated) /
- (GetTotalMemory() - los_bytes_allocated);
- if ((utilization > 0.75f && !IsLowMemoryMode()) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) {
- // Don't bother trimming the alloc space if it's more than 75% utilized and low memory mode is
- // not enabled, or if a heap trim occurred in the last two seconds.
+ // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
+ if (ms_time - last_trim_time_ms_ < 2 * 1000) {
return;
}
Thread* self = Thread::Current();
- {
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- Runtime* runtime = Runtime::Current();
- if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) {
- // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
- // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
- // as we don't hold the lock while requesting the trim).
- return;
- }
+ Runtime* runtime = Runtime::Current();
+ if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
+ // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
+ // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
+ // as we don't hold the lock while requesting the trim).
+ return;
}
last_trim_time_ms_ = ms_time;
@@ -2034,50 +2256,55 @@ void Heap::RequestHeapTrim() {
}
}
-size_t Heap::Trim() {
- // Handle a requested heap trim on a thread outside of the main GC thread.
- return alloc_space_->Trim();
-}
-
bool Heap::IsGCRequestPending() const {
return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
}
+void Heap::RunFinalization(JNIEnv* env) {
+ // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
+ if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
+ CHECK(WellKnownClasses::java_lang_System != nullptr);
+ WellKnownClasses::java_lang_System_runFinalization =
+ CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
+ CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
+ }
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+ WellKnownClasses::java_lang_System_runFinalization);
+}
+
void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
+ Thread* self = ThreadForEnv(env);
+ if (native_need_to_run_finalization_) {
+ RunFinalization(env);
+ UpdateMaxNativeFootprint();
+ native_need_to_run_finalization_ = false;
+ }
// Total number of native bytes allocated.
native_bytes_allocated_.fetch_add(bytes);
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
// The second watermark is higher than the gc watermark. If you hit this it means you are
// allocating native objects faster than the GC can keep up with.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- // Can't do this in WellKnownClasses::Init since System is not properly set up at that
- // point.
- if (UNLIKELY(WellKnownClasses::java_lang_System_runFinalization == NULL)) {
- DCHECK(WellKnownClasses::java_lang_System != NULL);
- WellKnownClasses::java_lang_System_runFinalization =
- CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
- CHECK(WellKnownClasses::java_lang_System_runFinalization != NULL);
- }
- if (WaitForConcurrentGcToComplete(ThreadForEnv(env)) != collector::kGcTypeNone) {
- // Just finished a GC, attempt to run finalizers.
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
- WellKnownClasses::java_lang_System_runFinalization);
- CHECK(!env->ExceptionCheck());
- }
-
- // If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
- WellKnownClasses::java_lang_System_runFinalization);
- CHECK(!env->ExceptionCheck());
- }
- // We have just run finalizers, update the native watermark since it is very likely that
- // finalizers released native managed allocations.
- UpdateMaxNativeFootprint();
- } else {
- if (!IsGCRequestPending()) {
- RequestConcurrentGC(ThreadForEnv(env));
+ if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
+ // Just finished a GC, attempt to run finalizers.
+ RunFinalization(env);
+ CHECK(!env->ExceptionCheck());
+ }
+ // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+ if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+ RunFinalization(env);
+ native_need_to_run_finalization_ = false;
+ CHECK(!env->ExceptionCheck());
+ }
+ // We have just run finalizers, update the native watermark since it is very likely that
+ // finalizers released native managed allocations.
+ UpdateMaxNativeFootprint();
+ } else if (!IsGCRequestPending()) {
+ if (concurrent_gc_) {
+ RequestConcurrentGC(self);
+ } else {
+ CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
}
}
}
@@ -2086,26 +2313,24 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
int expected_size, new_size;
do {
- expected_size = native_bytes_allocated_.load();
- new_size = expected_size - bytes;
- if (UNLIKELY(new_size < 0)) {
- ScopedObjectAccess soa(env);
- env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
- StringPrintf("Attempted to free %d native bytes with only %d native bytes "
- "registered as allocated", bytes, expected_size).c_str());
- break;
- }
+ expected_size = native_bytes_allocated_.load();
+ new_size = expected_size - bytes;
+ if (UNLIKELY(new_size < 0)) {
+ ScopedObjectAccess soa(env);
+ env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
+ StringPrintf("Attempted to free %d native bytes with only %d native bytes "
+ "registered as allocated", bytes, expected_size).c_str());
+ break;
+ }
} while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
}
int64_t Heap::GetTotalMemory() const {
int64_t ret = 0;
for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- // Currently don't include the image space.
- } else if (space->IsDlMallocSpace()) {
- // Zygote or alloc space
- ret += space->AsDlMallocSpace()->GetFootprint();
+ // Currently don't include the image space.
+ if (!space->IsImageSpace()) {
+ ret += space->Size();
}
}
for (const auto& space : discontinuous_spaces_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 91909e4f07..0fa000f18d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -31,6 +31,7 @@
#include "jni.h"
#include "locks.h"
#include "offsets.h"
+#include "root_visitor.h"
#include "safe_map.h"
#include "thread_pool.h"
@@ -57,16 +58,19 @@ namespace accounting {
namespace collector {
class GarbageCollector;
class MarkSweep;
+ class SemiSpace;
} // namespace collector
namespace space {
class AllocSpace;
+ class BumpPointerSpace;
class DiscontinuousSpace;
class DlMallocSpace;
class ImageSpace;
class LargeObjectSpace;
class Space;
class SpaceTest;
+ class ContinuousMemMapAllocSpace;
} // namespace space
class AgeCardVisitor {
@@ -101,13 +105,13 @@ enum HeapVerificationMode {
};
static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
-// If true, measure the total allocation time.
-static constexpr bool kMeasureAllocationTime = false;
-// Primitive arrays larger than this size are put in the large object space.
-static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
-
class Heap {
public:
+ // If true, measure the total allocation time.
+ static constexpr bool kMeasureAllocationTime = false;
+ // Primitive arrays larger than this size are put in the large object space.
+ static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
+
static constexpr size_t kDefaultInitialSize = 2 * MB;
static constexpr size_t kDefaultMaximumSize = 32 * MB;
static constexpr size_t kDefaultMaxFree = 2 * MB;
@@ -135,14 +139,47 @@ class Heap {
// Allocates and initializes storage for an object instance.
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(!kMovingClasses);
return AllocObjectInstrumented(self, klass, num_bytes);
}
+ // Allocates and initializes storage for an object instance.
+ mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(!kMovingClasses);
+ return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+ }
mirror::Object* AllocObjectInstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(!kMovingClasses);
+ if (kMovingCollector) {
+ return AllocMovableObjectInstrumented(self, klass, num_bytes);
+ } else {
+ return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+ }
+ }
mirror::Object* AllocObjectUninstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(!kMovingClasses);
+ if (kMovingCollector) {
+ return AllocMovableObjectUninstrumented(self, klass, num_bytes);
+ } else {
+ return AllocNonMovableObjectUninstrumented(self, klass, num_bytes);
+ }
+ }
+ mirror::Object* AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* klass,
+ size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
+ size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count)
+ // Visit all of the live objects in the heap.
+ void VisitObjects(ObjectVisitorCallback callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
@@ -152,7 +189,7 @@ class Heap {
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
void VerifyObjectImpl(const mirror::Object* o);
void VerifyObject(const mirror::Object* o) {
- if (o != NULL && this != NULL && verify_object_mode_ > kNoHeapVerification) {
+ if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
VerifyObjectImpl(o);
}
}
@@ -169,7 +206,10 @@ class Heap {
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsHeapAddress(const mirror::Object* obj);
+ bool IsValidObjectAddress(const mirror::Object* obj) const;
+
+ // Returns true if the address passed in is a heap address, doesn't need to be aligned.
+ bool IsHeapAddress(const mirror::Object* obj) const;
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
@@ -177,6 +217,17 @@ class Heap {
bool search_live_stack = true, bool sorted = false)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ // Returns true if there is any chance that the object (obj) will move.
+ bool IsMovableObject(const mirror::Object* obj) const;
+
+ // Returns true if an object is in the temp space, if this happens its usually indicative of
+ // compaction related errors.
+ bool IsInTempSpace(const mirror::Object* obj) const;
+
+ // Enables us to prevent GC until objects are released.
+ void IncrementDisableGC(Thread* self);
+ void DecrementDisableGC(Thread* self);
+
// Initiates an explicit garbage collection.
void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -221,9 +272,9 @@ class Heap {
// from the system. Doesn't allow the space to exceed its growth limit.
void SetIdealFootprint(size_t max_allowed_footprint);
- // Blocks the caller until the garbage collector becomes idle and returns
- // true if we waited for the GC to complete.
- collector::GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+ // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
+ // waited for.
+ collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
return continuous_spaces_;
@@ -239,7 +290,10 @@ class Heap {
MemberOffset reference_pendingNext_offset,
MemberOffset finalizer_reference_zombie_offset);
- mirror::Object* GetReferenceReferent(mirror::Object* reference);
+ void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetReferenceReferent(mirror::Object* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if the reference object has not yet been enqueued.
@@ -316,7 +370,7 @@ class Heap {
}
// Returns the number of objects currently allocated.
- size_t GetObjectsAllocated() const;
+ size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
// Returns the total number of objects allocated since the heap was created.
size_t GetObjectsAllocatedEver() const;
@@ -361,7 +415,8 @@ class Heap {
void DumpForSigQuit(std::ostream& os);
- size_t Trim();
+ // Trim the managed and native heaps by releasing unused memory back to the OS.
+ void Trim();
accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
return live_bitmap_.get();
@@ -375,7 +430,7 @@ class Heap {
return live_stack_.get();
}
- void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
// Mark and empty stack.
void FlushAllocStack()
@@ -386,6 +441,10 @@ class Heap {
accounting::ObjectStack* stack)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ // Mark the specified allocation stack as live.
+ void MarkAllocStackAsLive(accounting::ObjectStack* stack)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
// Gets called when we get notified by ActivityThread that the process state has changed.
void ListenForProcessStateChange();
@@ -393,8 +452,8 @@ class Heap {
// Assumes there is only one image space.
space::ImageSpace* GetImageSpace() const;
- space::DlMallocSpace* GetAllocSpace() const {
- return alloc_space_;
+ space::DlMallocSpace* GetNonMovingSpace() const {
+ return non_moving_space_;
}
space::LargeObjectSpace* GetLargeObjectsSpace() const {
@@ -417,7 +476,7 @@ class Heap {
return phantom_ref_queue_lock_;
}
- void DumpSpaces();
+ void DumpSpaces(std::ostream& stream = LOG(INFO));
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
@@ -442,7 +501,20 @@ class Heap {
accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
+ mirror::Object* AllocMovableObjectInstrumented(Thread* self, mirror::Class* klass,
+ size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* AllocMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
+ size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsCompilingBoot() const;
+ bool HasImageSpace() const;
+
private:
+ void Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space);
+
bool TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
mirror::Object** obj_ptr, size_t* bytes_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -471,6 +543,11 @@ class Heap {
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Allocate into a specific space.
+ mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
+ size_t bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Try to allocate a number of bytes, this function never does any GCs.
mirror::Object* TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
bool grow, size_t* bytes_allocated)
@@ -500,6 +577,17 @@ class Heap {
// Pushes a list of cleared references out to the managed heap.
void EnqueueClearedReferences(mirror::Object** cleared_references);
+ // Print a reference queue.
+ void PrintReferenceQueue(std::ostream& os, mirror::Object** queue);
+
+ // Run the finalizers.
+ void RunFinalization(JNIEnv* env);
+
+ // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
+ // waited for.
+ collector::GcType WaitForGcToCompleteLocked(Thread* self)
+ EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
@@ -537,9 +625,7 @@ class Heap {
size_t GetPercentFree();
- void AddContinuousSpace(space::ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void AddDiscontinuousSpace(space::DiscontinuousSpace* space)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
// No thread saftey analysis since we call this everywhere and it is impossible to find a proper
// lock ordering for it.
@@ -560,8 +646,12 @@ class Heap {
// All-known discontinuous spaces, where objects may be placed throughout virtual memory.
std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
- // The allocation space we are currently allocating into.
- space::DlMallocSpace* alloc_space_;
+ // All-known alloc spaces, where objects may be or have been allocated.
+ std::vector<space::AllocSpace*> alloc_spaces_;
+
+ // A space where non-movable objects are allocated, when compaction is enabled it contains
+ // Classes, ArtMethods, ArtFields, and non moving objects.
+ space::DlMallocSpace* non_moving_space_;
// The large object space we are currently allocating into.
space::LargeObjectSpace* large_object_space_;
@@ -599,6 +689,11 @@ class Heap {
// If we have a zygote space.
bool have_zygote_space_;
+ // Number of pinned primitive arrays in the movable space.
+ // Block all GC until this hits zero, or we hit the timeout!
+ size_t number_gc_blockers_;
+ static constexpr size_t KGCBlockTimeout = 30000;
+
// Guards access to the state of GC, associated conditional variable is used to signal when a GC
// completes.
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -635,6 +730,9 @@ class Heap {
// The watermark at which a GC is performed inside of registerNativeAllocation.
size_t native_footprint_limit_;
+ // Whether or not we need to run finalizers in the next native allocation.
+ bool native_need_to_run_finalization_;
+
// Activity manager members.
jclass activity_thread_class_;
jclass application_thread_class_;
@@ -714,6 +812,11 @@ class Heap {
// Second allocation stack so that we can process allocation with the heap unlocked.
UniquePtr<accounting::ObjectStack> live_stack_;
+ // Bump pointer spaces.
+ space::BumpPointerSpace* bump_pointer_space_;
+ // Temp space is the space which the semispace collector copies to.
+ space::BumpPointerSpace* temp_space_;
+
// offset of java.lang.ref.Reference.referent
MemberOffset reference_referent_offset_;
@@ -748,11 +851,16 @@ class Heap {
// The current state of heap verification, may be enabled or disabled.
HeapVerificationMode verify_object_mode_;
- std::vector<collector::MarkSweep*> mark_sweep_collectors_;
+ // GC disable count, error on GC if > 0.
+ size_t gc_disable_count_ GUARDED_BY(gc_complete_lock_);
+
+ std::vector<collector::GarbageCollector*> garbage_collectors_;
+ collector::SemiSpace* semi_space_collector_;
const bool running_on_valgrind_;
friend class collector::MarkSweep;
+ friend class collector::SemiSpace;
friend class VerifyReferenceCardVisitor;
friend class VerifyReferenceVisitor;
friend class VerifyObjectVisitor;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 02708e8341..8af2725e1d 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -43,12 +43,14 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
ScopedObjectAccess soa(Thread::Current());
// garbage is created during ClassLinker::Init
- mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
+ SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass("[Ljava/lang/Object;"));
for (size_t i = 0; i < 1024; ++i) {
SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c, 2048));
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
for (size_t j = 0; j < 2048; ++j) {
- array->Set(j, mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
+ // SIRT operator -> deferences the SIRT before running the method.
+ array->Set(j, string);
}
}
}
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
new file mode 100644
index 0000000000..85ef2f432f
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+
+#include "bump_pointer_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ byte* old_end;
+ byte* new_end;
+ do {
+ old_end = end_;
+ new_end = old_end + num_bytes;
+ // If there is no more room in the region, we are out of memory.
+ if (UNLIKELY(new_end > growth_end_)) {
+ return nullptr;
+ }
+ // TODO: Use a cas which always equals the size of pointers.
+ } while (android_atomic_cas(reinterpret_cast<int32_t>(old_end),
+ reinterpret_cast<int32_t>(new_end),
+ reinterpret_cast<volatile int32_t*>(&end_)) != 0);
+ // TODO: Less statistics?
+ total_bytes_allocated_.fetch_add(num_bytes);
+ num_objects_allocated_.fetch_add(1);
+ total_objects_allocated_.fetch_add(1);
+ return reinterpret_cast<mirror::Object*>(old_end);
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
new file mode 100644
index 0000000000..06ba57e03a
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bump_pointer_space.h"
+#include "bump_pointer_space-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
+ byte* requested_begin) {
+ capacity = RoundUp(capacity, kPageSize);
+ std::string error_msg;
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
+ PROT_READ | PROT_WRITE, &error_msg));
+ if (mem_map.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+ << PrettySize(capacity) << " with message " << error_msg;
+ return nullptr;
+ }
+ return new BumpPointerSpace(name, mem_map.release());
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+ : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+ kGcRetentionPolicyAlwaysCollect),
+ num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+ growth_end_(limit) {
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
+ : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+ kGcRetentionPolicyAlwaysCollect),
+ num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+ growth_end_(mem_map->End()) {
+}
+
+mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) {
+ mirror::Object* ret = AllocNonvirtual(num_bytes);
+ if (LIKELY(ret != nullptr)) {
+ *bytes_allocated = num_bytes;
+ }
+ return ret;
+}
+
+size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) {
+ return AllocationSizeNonvirtual(obj);
+}
+
+void BumpPointerSpace::Clear() {
+ // Release the pages back to the operating system.
+ CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+ // Reset the end of the space back to the beginning, we move the end forward as we allocate
+ // objects.
+ SetEnd(Begin());
+ growth_end_ = Limit();
+ num_objects_allocated_ = 0;
+}
+
+void BumpPointerSpace::Dump(std::ostream& os) const {
+ os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+ << reinterpret_cast<void*>(Limit());
+}
+
+mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
+ const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+ return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
new file mode 100644
index 0000000000..0faac0ce46
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+ class MarkSweep;
+} // namespace collector
+
+namespace space {
+
+// A bump pointer space is a space where objects may be allocated and garbage collected.
+class BumpPointerSpace : public ContinuousMemMapAllocSpace {
+ public:
+ typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+ SpaceType GetType() const {
+ return kSpaceTypeBumpPointerSpace;
+ }
+
+ // Create a bump pointer space with the requested sizes. The requested base address is not
+ // guaranteed to be granted, if it is required, the caller should call Begin on the returned
+ // space to confirm the request was granted.
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+
+ // Allocate num_bytes, returns nullptr if the space is full.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+ mirror::Object* AllocNonvirtual(size_t num_bytes);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Nos unless we support free lists.
+ virtual size_t Free(Thread*, mirror::Object*) {
+ return 0;
+ }
+ virtual size_t FreeList(Thread*, size_t, mirror::Object**) {
+ return 0;
+ }
+
+ size_t AllocationSizeNonvirtual(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return obj->SizeOf();
+ }
+
+ // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+ // maximum reserved size of the heap.
+ void ClearGrowthLimit() {
+ growth_end_ = Limit();
+ }
+
+ // Override capacity so that we only return the possibly limited capacity
+ size_t Capacity() const {
+ return growth_end_ - begin_;
+ }
+
+ // The total amount of memory reserved for the space.
+ size_t NonGrowthLimitCapacity() const {
+ return GetMemMap()->Size();
+ }
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return nullptr;
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ return nullptr;
+ }
+
+ // Clear the memory and reset the pointer to the start of the space.
+ void Clear();
+
+ void Dump(std::ostream& os) const;
+
+ uint64_t GetBytesAllocated() {
+ return Size();
+ }
+
+ uint64_t GetObjectsAllocated() {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() {
+ return total_objects_allocated_;
+ }
+
+ bool Contains(const mirror::Object* obj) const {
+ const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+ return byte_obj >= Begin() && byte_obj < End();
+ }
+
+ // TODO: Change this? Mainly used for compacting to a particular region of memory.
+ BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+
+ // Return the object which comes after obj, while ensuring alignment.
+ static mirror::Object* GetNextObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ protected:
+ BumpPointerSpace(const std::string& name, MemMap* mem_map);
+
+ size_t InternalAllocationSize(const mirror::Object* obj);
+ mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // Approximate number of bytes which have been allocated into the space.
+ AtomicInteger num_objects_allocated_;
+ AtomicInteger total_bytes_allocated_;
+ AtomicInteger total_objects_allocated_;
+
+ // Alignment.
+ static constexpr size_t kAlignment = 8;
+
+ byte* growth_end_;
+
+ private:
+ friend class collector::MarkSweep;
+ DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9ebc16a4a3..8a5e33a403 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -102,8 +102,8 @@ class ValgrindDlMallocSpace : public DlMallocSpace {
}
ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, size_t growth_limit, size_t initial_size) :
- DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+ byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
+ DlMallocSpace(name, mem_map, mspace, begin, end, limit, growth_limit) {
VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
}
@@ -117,15 +117,13 @@ class ValgrindDlMallocSpace : public DlMallocSpace {
size_t DlMallocSpace::bitmap_index_ = 0;
DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, size_t growth_limit)
- : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
+ byte* end, byte* limit, size_t growth_limit)
+ : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
recent_free_pos_(0), total_bytes_freed_(0), total_objects_freed_(0),
lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
-
size_t bitmap_index = bitmap_index_++;
-
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
@@ -133,12 +131,10 @@ DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* msp
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
-
mark_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
-
for (auto& freed : recent_freed_objects_) {
freed.first = nullptr;
freed.second = nullptr;
@@ -207,12 +203,14 @@ DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_siz
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
DlMallocSpace* space;
+ byte* begin = mem_map_ptr->Begin();
if (RUNNING_ON_VALGRIND > 0) {
- space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity,
growth_limit, initial_size);
} else {
- space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+ space = new DlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity, growth_limit);
}
+ // We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -318,7 +316,8 @@ DlMallocSpace* DlMallocSpace::CreateZygoteSpace(const char* alloc_space_name) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
}
DlMallocSpace* alloc_space =
- new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit);
+ new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, limit_,
+ growth_limit);
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
@@ -343,8 +342,7 @@ mirror::Class* DlMallocSpace::FindRecentFreedObject(const mirror::Object* obj) {
}
void DlMallocSpace::RegisterRecentFree(mirror::Object* ptr) {
- recent_freed_objects_[recent_free_pos_].first = ptr;
- recent_freed_objects_[recent_free_pos_].second = ptr->GetClass();
+ recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass());
recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
}
@@ -412,8 +410,8 @@ size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
// Callback from dlmalloc when it needs to increase the footprint
extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Heap* heap = Runtime::Current()->GetHeap();
- DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
- return heap->GetAllocSpace()->MoreCore(increment);
+ DCHECK_EQ(heap->GetNonMovingSpace()->GetMspace(), mspace);
+ return heap->GetNonMovingSpace()->MoreCore(increment);
}
void* DlMallocSpace::MoreCore(intptr_t increment) {
@@ -482,6 +480,29 @@ size_t DlMallocSpace::GetFootprintLimit() {
return mspace_footprint_limit(mspace_);
}
+// Returns the old mark bitmap.
+accounting::SpaceBitmap* DlMallocSpace::BindLiveToMarkBitmap() {
+ accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release();
+ temp_bitmap_.reset(mark_bitmap);
+ mark_bitmap_.reset(live_bitmap);
+ return mark_bitmap;
+}
+
+bool DlMallocSpace::HasBoundBitmaps() const {
+ return temp_bitmap_.get() != nullptr;
+}
+
+void DlMallocSpace::UnBindBitmaps() {
+ CHECK(HasBoundBitmaps());
+ // At this point, the temp_bitmap holds our old mark bitmap.
+ accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release();
+ CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
+ mark_bitmap_.reset(new_bitmap);
+ DCHECK(temp_bitmap_.get() == NULL);
+}
+
+
void DlMallocSpace::SetFootprintLimit(size_t new_size) {
MutexLock mu(Thread::Current(), lock_);
VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
@@ -504,17 +525,25 @@ void DlMallocSpace::Dump(std::ostream& os) const {
}
uint64_t DlMallocSpace::GetBytesAllocated() {
- MutexLock mu(Thread::Current(), lock_);
- size_t bytes_allocated = 0;
- mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
- return bytes_allocated;
+ if (mspace_ != nullptr) {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
+ } else {
+ return Size();
+ }
}
uint64_t DlMallocSpace::GetObjectsAllocated() {
- MutexLock mu(Thread::Current(), lock_);
- size_t objects_allocated = 0;
- mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
- return objects_allocated;
+ if (mspace_ != nullptr) {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t objects_allocated = 0;
+ mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
+ return objects_allocated;
+ } else {
+ return 0;
+ }
}
} // namespace space
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 522535e3c0..59dafe3f2a 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@ namespace collector {
namespace space {
// An alloc space is a space where objects may be allocated and garbage collected.
-class DlMallocSpace : public MemMapSpace, public AllocSpace {
+class DlMallocSpace : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -136,19 +136,30 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace {
return GetObjectsAllocated() + total_objects_freed_;
}
+ // Returns the old mark bitmap.
+ accounting::SpaceBitmap* BindLiveToMarkBitmap();
+ bool HasBoundBitmaps() const;
+ void UnBindBitmaps();
+
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
+ // Used to ensure that failure happens when you free / allocate into an invalidated space. If we
+ // don't do this we may get heap corruption instead of a segfault at null.
+ void InvalidateMSpace() {
+ mspace_ = nullptr;
+ }
+
protected:
DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- size_t growth_limit);
+ byte* limit, size_t growth_limit);
private:
size_t InternalAllocationSize(const mirror::Object* obj);
mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
- void RegisterRecentFree(mirror::Object* ptr);
+ void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
UniquePtr<accounting::SpaceBitmap> live_bitmap_;
@@ -174,7 +185,7 @@ class DlMallocSpace : public MemMapSpace, public AllocSpace {
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Underlying malloc space
- void* const mspace_;
+ void* mspace_;
// The capacity of the alloc space until such time that ClearGrowthLimit is called.
// The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e12ee063c0..c6177bd01d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -39,8 +39,9 @@ AtomicInteger ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map,
accounting::SpaceBitmap* live_bitmap)
- : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
- DCHECK(live_bitmap != NULL);
+ : MemMapSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ kGcRetentionPolicyNeverCollect) {
+ DCHECK(live_bitmap != nullptr);
live_bitmap_.reset(live_bitmap);
}
@@ -332,7 +333,7 @@ OatFile* ImageSpace::ReleaseOatFile() {
void ImageSpace::Dump(std::ostream& os) const {
os << GetType()
- << "begin=" << reinterpret_cast<void*>(Begin())
+ << " begin=" << reinterpret_cast<void*>(Begin())
<< ",end=" << reinterpret_cast<void*>(End())
<< ",size=" << PrettySize(Size())
<< ",name=\"" << GetName() << "\"]";
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index ef889d42c2..07fb288576 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -59,6 +59,14 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+ virtual bool IsAllocSpace() const {
+ return true;
+ }
+
+ virtual AllocSpace* AsAllocSpace() {
+ return this;
+ }
+
protected:
explicit LargeObjectSpace(const std::string& name);
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
index 2c3b93c60d..f1031ff8d4 100644
--- a/runtime/gc/space/space-inl.h
+++ b/runtime/gc/space/space-inl.h
@@ -27,18 +27,28 @@ namespace gc {
namespace space {
inline ImageSpace* Space::AsImageSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+ DCHECK(IsImageSpace());
return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
}
inline DlMallocSpace* Space::AsDlMallocSpace() {
- DCHECK(GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace);
+ DCHECK(IsDlMallocSpace());
return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
}
inline LargeObjectSpace* Space::AsLargeObjectSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
- return reinterpret_cast<LargeObjectSpace*>(this);
+ DCHECK(IsLargeObjectSpace());
+ return down_cast<LargeObjectSpace*>(this);
+}
+
+inline ContinuousSpace* Space::AsContinuousSpace() {
+ DCHECK(IsContinuousSpace());
+ return down_cast<ContinuousSpace*>(this);
+}
+
+inline DiscontinuousSpace* Space::AsDiscontinuousSpace() {
+ DCHECK(IsDiscontinuousSpace());
+ return down_cast<DiscontinuousSpace*>(this);
}
} // namespace space
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index de48b743f5..8eb17e0c1e 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -34,7 +34,6 @@ std::ostream& operator<<(std::ostream& os, const Space& space) {
return os;
}
-
DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
GcRetentionPolicy gc_retention_policy) :
Space(name, gc_retention_policy),
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 6dd795227d..4c05ddef58 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -42,7 +42,10 @@ class Heap;
namespace space {
+class AllocSpace;
+class ContinuousSpace;
class DlMallocSpace;
+class DiscontinuousSpace;
class ImageSpace;
class LargeObjectSpace;
@@ -64,6 +67,7 @@ enum SpaceType {
kSpaceTypeImageSpace,
kSpaceTypeAllocSpace,
kSpaceTypeZygoteSpace,
+ kSpaceTypeBumpPointerSpace,
kSpaceTypeLargeObjectSpace,
};
std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
@@ -113,12 +117,35 @@ class Space {
return GetType() == kSpaceTypeZygoteSpace;
}
+ // Is this space a bump pointer space?
+ bool IsBumpPointerSpace() const {
+ return GetType() == kSpaceTypeBumpPointerSpace;
+ }
+
// Does this space hold large objects and implement the large object space abstraction?
bool IsLargeObjectSpace() const {
return GetType() == kSpaceTypeLargeObjectSpace;
}
LargeObjectSpace* AsLargeObjectSpace();
+ virtual bool IsContinuousSpace() const {
+ return false;
+ }
+ ContinuousSpace* AsContinuousSpace();
+
+ virtual bool IsDiscontinuousSpace() const {
+ return false;
+ }
+ DiscontinuousSpace* AsDiscontinuousSpace();
+
+ virtual bool IsAllocSpace() const {
+ return false;
+ }
+ virtual AllocSpace* AsAllocSpace() {
+ LOG(FATAL) << "Unimplemented";
+ return nullptr;
+ }
+
virtual ~Space() {}
protected:
@@ -131,13 +158,13 @@ class Space {
// Name of the space that may vary due to the Zygote fork.
std::string name_;
- private:
+ protected:
// When should objects within this space be reclaimed? Not constant as we vary it in the case
// of Zygote forking.
GcRetentionPolicy gc_retention_policy_;
+ private:
friend class art::gc::Heap;
-
DISALLOW_COPY_AND_ASSIGN(Space);
};
std::ostream& operator<<(std::ostream& os, const Space& space);
@@ -180,16 +207,31 @@ class AllocSpace {
// continuous spaces can be marked in the card table.
class ContinuousSpace : public Space {
public:
- // Address at which the space begins
+ // Address at which the space begins.
byte* Begin() const {
return begin_;
}
- // Address at which the space ends, which may vary as the space is filled.
+ // Current address at which the space ends, which may vary as the space is filled.
byte* End() const {
return end_;
}
+ // The end of the address range covered by the space.
+ byte* Limit() const {
+ return limit_;
+ }
+
+ // Change the end of the space. Be careful with use since changing the end of a space to an
+ // invalid value may break the GC.
+ void SetEnd(byte* end) {
+ end_ = end;
+ }
+
+ void SetLimit(byte* limit) {
+ limit_ = limit;
+ }
+
// Current size of space
size_t Size() const {
return End() - Begin();
@@ -198,31 +240,42 @@ class ContinuousSpace : public Space {
virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+ // Maximum which the mapped space can grow to.
+ virtual size_t Capacity() const {
+ return Limit() - Begin();
+ }
+
// Is object within this space? We check to see if the pointer is beyond the end first as
// continuous spaces are iterated over from low to high.
bool HasAddress(const mirror::Object* obj) const {
const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
- return byte_ptr < End() && byte_ptr >= Begin();
+ return byte_ptr >= Begin() && byte_ptr < Limit();
}
bool Contains(const mirror::Object* obj) const {
return HasAddress(obj);
}
+ virtual bool IsContinuousSpace() const {
+ return true;
+ }
+
virtual ~ContinuousSpace() {}
protected:
ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
- byte* begin, byte* end) :
- Space(name, gc_retention_policy), begin_(begin), end_(end) {
+ byte* begin, byte* end, byte* limit) :
+ Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
}
-
// The beginning of the storage for fast access.
- byte* const begin_;
+ byte* begin_;
// Current end of the space.
- byte* end_;
+ byte* volatile end_;
+
+ // Limit of the space.
+ byte* limit_;
private:
DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -241,6 +294,10 @@ class DiscontinuousSpace : public Space {
return mark_objects_.get();
}
+ virtual bool IsDiscontinuousSpace() const {
+ return true;
+ }
+
virtual ~DiscontinuousSpace() {}
protected:
@@ -255,25 +312,12 @@ class DiscontinuousSpace : public Space {
class MemMapSpace : public ContinuousSpace {
public:
- // Maximum which the mapped space can grow to.
- virtual size_t Capacity() const {
- return mem_map_->Size();
- }
-
// Size of the space without a limit on its growth. By default this is just the Capacity, but
// for the allocation space we support starting with a small heap and then extending it.
virtual size_t NonGrowthLimitCapacity() const {
return Capacity();
}
- protected:
- MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
- GcRetentionPolicy gc_retention_policy)
- : ContinuousSpace(name, gc_retention_policy,
- mem_map->Begin(), mem_map->Begin() + initial_size),
- mem_map_(mem_map) {
- }
-
MemMap* GetMemMap() {
return mem_map_.get();
}
@@ -282,13 +326,45 @@ class MemMapSpace : public ContinuousSpace {
return mem_map_.get();
}
- private:
+ protected:
+ MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+ GcRetentionPolicy gc_retention_policy)
+ : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
+ mem_map_(mem_map) {
+ }
+
// Underlying storage of the space
UniquePtr<MemMap> mem_map_;
+ private:
DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
};
+// Used by the heap compaction interface to enable copying from one type of alloc space to another.
+class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
+ public:
+ virtual bool IsAllocSpace() const {
+ return true;
+ }
+
+ virtual AllocSpace* AsAllocSpace() {
+ return this;
+ }
+
+ virtual void Clear() {
+ LOG(FATAL) << "Unimplemented";
+ }
+
+ protected:
+ ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
+ byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+ : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
+};
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 455168c90f..383714bb04 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -33,8 +33,8 @@ class SpaceTest : public CommonTest {
int round, size_t growth_limit);
void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
- void AddContinuousSpace(ContinuousSpace* space) {
- Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+ void AddSpace(ContinuousSpace* space) {
+ Runtime::Current()->GetHeap()->AddSpace(space);
}
};
@@ -91,7 +91,7 @@ TEST_F(SpaceTest, ZygoteSpace) {
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the footprint limit.
@@ -136,7 +136,7 @@ TEST_F(SpaceTest, ZygoteSpace) {
space = space->CreateZygoteSpace("alloc space");
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -164,7 +164,7 @@ TEST_F(SpaceTest, AllocAndFree) {
Thread* self = Thread::Current();
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -270,7 +270,7 @@ TEST_F(SpaceTest, AllocAndFreeList) {
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the max allowed footprint.
@@ -467,7 +467,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size) {
EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// In this round we don't allocate with growth and therefore can't grow past the initial size.
// This effectively makes the growth_limit the initial_size, so assert this.
diff --git a/runtime/globals.h b/runtime/globals.h
index 31574ff72d..10426b0fe2 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -73,6 +73,15 @@ const bool kIsTargetBuild = true;
const bool kIsTargetBuild = false;
#endif
+// Garbage collector constants.
+static constexpr bool kMovingCollector = false;
+// True if we allow moving classes.
+static constexpr bool kMovingClasses = false;
+// True if we allow moving fields.
+static constexpr bool kMovingFields = false;
+// True if we allow moving methods.
+static constexpr bool kMovingMethods = false;
+
} // namespace art
#endif // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 8f9e072093..a829e97a23 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -48,7 +48,7 @@ void InternTable::VisitRoots(RootVisitor* visitor, void* arg,
MutexLock mu(Thread::Current(), intern_table_lock_);
if (!only_dirty || is_dirty_) {
for (auto& strong_intern : strong_interns_) {
- strong_intern.second = reinterpret_cast<mirror::String*>(visitor(strong_intern.second, arg));
+ strong_intern.second = down_cast<mirror::String*>(visitor(strong_intern.second, arg));
DCHECK(strong_intern.second != nullptr);
}
@@ -59,8 +59,7 @@ void InternTable::VisitRoots(RootVisitor* visitor, void* arg,
// Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
}
-mirror::String* InternTable::Lookup(Table& table, mirror::String* s,
- uint32_t hash_code) {
+mirror::String* InternTable::Lookup(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
mirror::String* existing_string = it->second;
@@ -71,8 +70,7 @@ mirror::String* InternTable::Lookup(Table& table, mirror::String* s,
return NULL;
}
-mirror::String* InternTable::Insert(Table& table, mirror::String* s,
- uint32_t hash_code) {
+mirror::String* InternTable::Insert(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
table.insert(std::make_pair(hash_code, s));
return s;
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d7555ddb6a..9938478b2c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -430,8 +430,8 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh
if (method->IsStatic()) {
Class* declaringClass = method->GetDeclaringClass();
if (UNLIKELY(!declaringClass->IsInitializing())) {
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass,
- true, true))) {
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass, true,
+ true))) {
DCHECK(Thread::Current()->IsExceptionPending());
self->PopShadowFrame();
return;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 19f55d2f97..08221b723d 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -29,7 +29,7 @@ static inline void AssignRegister(ShadowFrame& new_shadow_frame, const ShadowFra
size_t dest_reg, size_t src_reg) {
// If both register locations contains the same value, the register probably holds a reference.
int32_t src_value = shadow_frame.GetVReg(src_reg);
- mirror::Object* o = shadow_frame.GetVRegReference(src_reg);
+ mirror::Object* o = shadow_frame.GetVRegReference<false>(src_reg);
if (src_value == reinterpret_cast<int32_t>(o)) {
new_shadow_frame.SetVRegReference(dest_reg, o);
} else {
@@ -193,7 +193,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
}
return false;
}
- Object* newArray = Array::Alloc(self, arrayClass, length);
+ Object* newArray = Array::Alloc<kMovingCollector, true>(self, arrayClass, length);
if (UNLIKELY(newArray == NULL)) {
DCHECK(self->IsExceptionPending());
return false;
@@ -233,7 +233,8 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
std::string name(PrettyMethod(shadow_frame->GetMethod()));
if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
- ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+
+ SirtRef<ClassLoader> class_loader(self, nullptr); // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
class_loader);
CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index ec717c1bee..466edebf59 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -22,6 +22,7 @@
#include <utility>
#include <vector>
+#include "atomic_integer.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stl_util.h"
@@ -292,8 +293,8 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
Class* field_type;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (sig[1] != '\0') {
- ClassLoader* cl = GetClassLoader(soa);
- field_type = class_linker->FindClass(sig, cl);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), GetClassLoader(soa));
+ field_type = class_linker->FindClass(sig, class_loader);
} else {
field_type = class_linker->FindPrimitiveClass(*sig);
}
@@ -646,8 +647,8 @@ class JNI {
ScopedObjectAccess soa(env);
Class* c = NULL;
if (runtime->IsStarted()) {
- ClassLoader* cl = GetClassLoader(soa);
- c = class_linker->FindClass(descriptor.c_str(), cl);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), GetClassLoader(soa));
+ c = class_linker->FindClass(descriptor.c_str(), class_loader);
} else {
c = class_linker->FindSystemClass(descriptor.c_str());
}
@@ -2002,14 +2003,22 @@ class JNI {
String* s = soa.Decode<String*>(java_string);
CharArray* chars = s->GetCharArray();
PinPrimitiveArray(soa, chars);
- if (is_copy != NULL) {
- *is_copy = JNI_FALSE;
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ int32_t char_count = s->GetLength();
+ int32_t offset = s->GetOffset();
+ jchar* bytes = new jchar[char_count + 1];
+ for (int32_t i = 0; i < char_count; i++) {
+ bytes[i] = chars->Get(i + offset);
}
- return chars->GetData() + s->GetOffset();
+ bytes[char_count] = '\0';
+ return bytes;
}
- static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar*) {
+ static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar* chars) {
CHECK_NON_NULL_ARGUMENT(GetStringUTFRegion, java_string);
+ delete[] chars;
ScopedObjectAccess soa(env);
UnpinPrimitiveArray(soa, soa.Decode<String*>(java_string)->GetCharArray());
}
@@ -2120,8 +2129,8 @@ class JNI {
// Find the class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Class* array_class = class_linker->FindClass(descriptor.c_str(),
- element_class->GetClassLoader());
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), element_class->GetClassLoader());
+ Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
if (array_class == NULL) {
return NULL;
}
@@ -2146,16 +2155,23 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(GetPrimitiveArrayCritical, java_array);
ScopedObjectAccess soa(env);
Array* array = soa.Decode<Array*>(java_array);
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (heap->IsMovableObject(array)) {
+ heap->IncrementDisableGC(soa.Self());
+ // Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
+ array = soa.Decode<Array*>(java_array);
+ }
PinPrimitiveArray(soa, array);
- if (is_copy != NULL) {
+ if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
- return array->GetRawData(array->GetClass()->GetComponentSize());
+ void* address = array->GetRawData(array->GetClass()->GetComponentSize());;
+ return address;
}
- static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void*, jint mode) {
+ static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* elements, jint mode) {
CHECK_NON_NULL_ARGUMENT(ReleasePrimitiveArrayCritical, array);
- ReleasePrimitiveArray(env, array, mode);
+ ReleasePrimitiveArray(env, array, elements, mode);
}
static jboolean* GetBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* is_copy) {
@@ -2206,36 +2222,40 @@ class JNI {
return GetPrimitiveArray<jshortArray, jshort*, ShortArray>(soa, array, is_copy);
}
- static void ReleaseBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* elements,
+ jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseByteArrayElements(JNIEnv* env, jbyteArray array, jbyte*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseByteArrayElements(JNIEnv* env, jbyteArray array, jbyte* elements, jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseCharArrayElements(JNIEnv* env, jcharArray array, jchar*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseCharArrayElements(JNIEnv* env, jcharArray array, jchar* elements, jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseDoubleArrayElements(JNIEnv* env, jdoubleArray array, jdouble*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseDoubleArrayElements(JNIEnv* env, jdoubleArray array, jdouble* elements,
+ jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseFloatArrayElements(JNIEnv* env, jfloatArray array, jfloat*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseFloatArrayElements(JNIEnv* env, jfloatArray array, jfloat* elements,
+ jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseIntArrayElements(JNIEnv* env, jintArray array, jint*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseIntArrayElements(JNIEnv* env, jintArray array, jint* elements, jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseLongArrayElements(JNIEnv* env, jlongArray array, jlong*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseLongArrayElements(JNIEnv* env, jlongArray array, jlong* elements, jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
- static void ReleaseShortArrayElements(JNIEnv* env, jshortArray array, jshort*, jint mode) {
- ReleasePrimitiveArray(env, array, mode);
+ static void ReleaseShortArrayElements(JNIEnv* env, jshortArray array, jshort* elements,
+ jint mode) {
+ ReleasePrimitiveArray(env, array, elements, mode);
}
static void GetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length,
@@ -2551,19 +2571,49 @@ class JNI {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
PinPrimitiveArray(soa, array);
- if (is_copy != NULL) {
- *is_copy = JNI_FALSE;
+ // Only make a copy if necessary.
+ if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ static const size_t component_size = array->GetClass()->GetComponentSize();
+ size_t size = array->GetLength() * component_size;
+ void* data = new uint64_t[RoundUp(size, 8) / 8];
+ memcpy(data, array->GetData(), size);
+ return reinterpret_cast<CArrayT>(data);
+ } else {
+ if (is_copy != nullptr) {
+ *is_copy = JNI_FALSE;
+ }
+ return reinterpret_cast<CArrayT>(array->GetData());
}
- return array->GetData();
}
- template <typename ArrayT>
- static void ReleasePrimitiveArray(JNIEnv* env, ArrayT java_array, jint mode) {
+ template <typename ArrayT, typename ElementT>
+ static void ReleasePrimitiveArray(JNIEnv* env, ArrayT java_array, ElementT* elements, jint mode) {
+ ScopedObjectAccess soa(env);
+ Array* array = soa.Decode<Array*>(java_array);
+ size_t component_size = array->GetClass()->GetComponentSize();
+ void* array_data = array->GetRawData(component_size);
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ bool is_copy = array_data != reinterpret_cast<void*>(elements);
+ size_t bytes = array->GetLength() * component_size;
+ VLOG(heap) << "Release primitive array " << env << " array_data " << array_data
+ << " elements " << reinterpret_cast<void*>(elements);
+ if (!is_copy && heap->IsMovableObject(array)) {
+ heap->DecrementDisableGC(soa.Self());
+ }
+ // Don't need to copy if we had a direct pointer.
+ if (mode != JNI_ABORT && is_copy) {
+ memcpy(array_data, elements, bytes);
+ }
if (mode != JNI_COMMIT) {
- ScopedObjectAccess soa(env);
- Array* array = soa.Decode<Array*>(java_array);
- UnpinPrimitiveArray(soa, array);
+ if (is_copy) {
+ delete[] reinterpret_cast<uint64_t*>(elements);
+ }
}
+ // TODO: Do we always unpin primitive array?
+ UnpinPrimitiveArray(soa, array);
}
template <typename JavaArrayT, typename JavaT, typename ArrayT>
@@ -2854,6 +2904,18 @@ JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
JNIEnvExt::~JNIEnvExt() {
}
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
+}
+
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj != nullptr) {
+ locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
+ }
+}
void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
check_jni = enabled;
functions = enabled ? GetCheckJniNativeInterface() : &gJniNativeInterface;
@@ -3199,7 +3261,7 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_lo
// the comments in the JNI FindClass function.)
typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
- ClassLoader* old_class_loader = self->GetClassLoaderOverride();
+ SirtRef<ClassLoader> old_class_loader(self, self->GetClassLoaderOverride());
self->SetClassLoaderOverride(class_loader);
int version = 0;
@@ -3209,7 +3271,7 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path, ClassLoader* class_lo
version = (*jni_on_load)(this, NULL);
}
- self->SetClassLoaderOverride(old_class_loader);
+ self->SetClassLoaderOverride(old_class_loader.get());
if (version == JNI_ERR) {
StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 888d5e5458..96f7ae0975 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -162,6 +162,9 @@ struct JNIEnvExt : public JNIEnv {
return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
}
+ jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
Thread* const self;
JavaVMExt* vm;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index c389580ebf..26b18364cf 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -86,19 +86,19 @@ class JniInternalTest : public CommonTest {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
+ SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
SirtRef<mirror::ClassLoader>
class_loader(self,
ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader));
if (is_static) {
- CompileDirectMethod(class_loader.get(), class_name, method_name, method_signature);
+ CompileDirectMethod(class_loader, class_name, method_name, method_signature);
} else {
- CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z");
- CompileDirectMethod(NULL, "java.lang.Object", "<init>", "()V");
- CompileVirtualMethod(class_loader.get(), class_name, method_name, method_signature);
+ CompileVirtualMethod(null_class_loader, "java.lang.Class", "isFinalizable", "()Z");
+ CompileDirectMethod(null_class_loader, "java.lang.Object", "<init>", "()V");
+ CompileVirtualMethod(class_loader, class_name, method_name, method_signature);
}
- mirror::Class* c = class_linker_->FindClass(DotToDescriptor(class_name).c_str(),
- class_loader.get());
+ mirror::Class* c = class_linker_->FindClass(DotToDescriptor(class_name).c_str(), class_loader);
CHECK(c != NULL);
method = is_static ? c->FindDirectMethod(method_name, method_signature)
@@ -1081,7 +1081,6 @@ TEST_F(JniInternalTest, RegisterNatives) {
EXPECT_EQ(memcmp(&src_buf[0], xs, size * sizeof(scalar_type)), 0) \
<< # get_elements_fn " not equal"; \
env_->release_elements_fn(a, xs, 0); \
- EXPECT_EQ(reinterpret_cast<uintptr_t>(v), reinterpret_cast<uintptr_t>(xs))
TEST_F(JniInternalTest, BooleanArrays) {
EXPECT_PRIMITIVE_ARRAY(NewBooleanArray, GetBooleanArrayRegion, SetBooleanArrayRegion,
@@ -1337,7 +1336,7 @@ TEST_F(JniInternalTest, GetStringChars_ReleaseStringChars) {
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringChars(s, &is_copy);
- EXPECT_EQ(JNI_FALSE, is_copy);
+ EXPECT_EQ(JNI_TRUE, is_copy);
EXPECT_EQ(expected[0], chars[0]);
EXPECT_EQ(expected[1], chars[1]);
EXPECT_EQ(expected[2], chars[2]);
@@ -1361,7 +1360,8 @@ TEST_F(JniInternalTest, GetStringCritical_ReleaseStringCritical) {
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringCritical(s, &is_copy);
- EXPECT_EQ(JNI_FALSE, is_copy);
+ // TODO: Fix GetStringCritical to use the same mechanism as GetPrimitiveArrayElementsCritical.
+ EXPECT_EQ(JNI_TRUE, is_copy);
EXPECT_EQ(expected[0], chars[0]);
EXPECT_EQ(expected[1], chars[1]);
EXPECT_EQ(expected[2], chars[2]);
@@ -1669,9 +1669,9 @@ TEST_F(JniInternalTest, StaticMainMethod) {
jobject jclass_loader = LoadDex("Main");
SirtRef<mirror::ClassLoader>
class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- CompileDirectMethod(class_loader.get(), "Main", "main", "([Ljava/lang/String;)V");
+ CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
- mirror::Class* klass = class_linker_->FindClass("LMain;", class_loader.get());
+ mirror::Class* klass = class_linker_->FindClass("LMain;", class_loader);
ASSERT_TRUE(klass != NULL);
mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index efd3d9d25e..aea10c20f8 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -36,6 +36,11 @@ inline Monitor* LockWord::FatLockMonitor() const {
return reinterpret_cast<Monitor*>(value_ << kStateSize);
}
+inline size_t LockWord::ForwardingAddress() const {
+ DCHECK_EQ(GetState(), kForwardingAddress);
+ return static_cast<size_t>(value_ << kStateSize);
+}
+
inline LockWord::LockWord() : value_(0) {
DCHECK_EQ(GetState(), kUnlocked);
}
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 1882ae6504..d24a3bbecc 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -21,6 +21,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "utils.h"
namespace art {
namespace mirror {
@@ -73,6 +74,7 @@ class LockWord {
kStateThinOrUnlocked = 0,
kStateFat = 1,
kStateHash = 2,
+ kStateForwardingAddress = 3,
// When the state is kHashCode, the non-state bits hold the hashcode.
kHashShift = 0,
@@ -86,6 +88,11 @@ class LockWord {
(kStateThinOrUnlocked << kStateShift));
}
+ static LockWord FromForwardingAddress(size_t target) {
+ DCHECK(IsAligned < 1 << kStateSize>(target));
+ return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift));
+ }
+
static LockWord FromHashCode(uint32_t hash_code) {
CHECK_LE(hash_code, static_cast<uint32_t>(kHashMask));
return LockWord((hash_code << kHashShift) | (kStateHash << kStateShift));
@@ -96,19 +103,25 @@ class LockWord {
kThinLocked, // Single uncontended owner.
kFatLocked, // See associated monitor.
kHashCode, // Lock word contains an identity hash.
+ kForwardingAddress, // Lock word contains the forwarding address of an object.
};
LockState GetState() const {
- uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
- if (value_ == 0) {
+ if (UNLIKELY(value_ == 0)) {
return kUnlocked;
- } else if (internal_state == kStateThinOrUnlocked) {
- return kThinLocked;
- } else if (internal_state == kStateHash) {
- return kHashCode;
} else {
- DCHECK_EQ(internal_state, static_cast<uint32_t>(kStateFat));
- return kFatLocked;
+ uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
+ switch (internal_state) {
+ case kStateThinOrUnlocked:
+ return kThinLocked;
+ case kStateHash:
+ return kHashCode;
+ case kStateForwardingAddress:
+ return kForwardingAddress;
+ default:
+ DCHECK_EQ(internal_state, static_cast<uint32_t>(kStateFat));
+ return kFatLocked;
+ }
}
}
@@ -121,6 +134,9 @@ class LockWord {
// Return the Monitor encoded in a fat lock.
Monitor* FatLockMonitor() const;
+ // Return the forwarding address stored in the monitor.
+ size_t ForwardingAddress() const;
+
// Default constructor with no lock ownership.
LockWord();
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index c60e714d44..ef73e4df47 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -66,36 +66,36 @@ static inline Array* SetArrayLength(Array* array, size_t length) {
return array;
}
-inline Array* Array::AllocInstrumented(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size) {
+template <bool kIsMovable, bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size) {
size_t size = ComputeArraySize(self, array_class, component_count, component_size);
if (UNLIKELY(size == 0)) {
return NULL;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- Array* array = down_cast<Array*>(heap->AllocObjectInstrumented(self, array_class, size));
- return SetArrayLength(array, component_count);
-}
-
-inline Array* Array::AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size) {
- size_t size = ComputeArraySize(self, array_class, component_count, component_size);
- if (UNLIKELY(size == 0)) {
- return NULL;
+ Array* array = nullptr;
+ if (kIsMovable) {
+ if (kIsInstrumented) {
+ array = down_cast<Array*>(heap->AllocMovableObjectInstrumented(self, array_class, size));
+ } else {
+ array = down_cast<Array*>(heap->AllocMovableObjectUninstrumented(self, array_class, size));
+ }
+ } else {
+ if (kIsInstrumented) {
+ array = down_cast<Array*>(heap->AllocNonMovableObjectInstrumented(self, array_class, size));
+ } else {
+ array = down_cast<Array*>(heap->AllocNonMovableObjectUninstrumented(self, array_class, size));
+ }
}
- gc::Heap* heap = Runtime::Current()->GetHeap();
- Array* array = down_cast<Array*>(heap->AllocObjectUninstrumented(self, array_class, size));
return SetArrayLength(array, component_count);
}
-inline Array* Array::AllocInstrumented(Thread* self, Class* array_class, int32_t component_count) {
- DCHECK(array_class->IsArrayClass());
- return AllocInstrumented(self, array_class, component_count, array_class->GetComponentSize());
-}
-
-inline Array* Array::AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count) {
+template <bool kIsMovable, bool kIsInstrumented>
+inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
DCHECK(array_class->IsArrayClass());
- return AllocUninstrumented(self, array_class, component_count, array_class->GetComponentSize());
+ return Alloc<kIsMovable, kIsInstrumented>(self, array_class, component_count,
+ array_class->GetComponentSize());
}
} // namespace mirror
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 020085dbf0..f8a283224c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -41,15 +41,16 @@ namespace mirror {
// Recursively create an array with multiple dimensions. Elements may be
// Objects or primitive types.
static Array* RecursiveCreateMultiArray(Thread* self, Class* array_class, int current_dimension,
- IntArray* dimensions)
+ SirtRef<mirror::IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
- SirtRef<Array> new_array(self, Array::Alloc(self, array_class, array_length));
+ SirtRef<Array> new_array(self, Array::Alloc<kMovingCollector, true>(self, array_class,
+ array_length));
if (UNLIKELY(new_array.get() == NULL)) {
CHECK(self->IsExceptionPending());
return NULL;
}
- if ((current_dimension + 1) < dimensions->GetLength()) {
+ if (current_dimension + 1 < dimensions->GetLength()) {
// Create a new sub-array in every element of the array.
for (int32_t i = 0; i < array_length; i++) {
Array* sub_array = RecursiveCreateMultiArray(self, array_class->GetComponentType(),
@@ -87,13 +88,15 @@ Array* Array::CreateMultiArray(Thread* self, Class* element_class, IntArray* dim
// Find/generate the array class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader());
+ SirtRef<mirror::ClassLoader> class_loader(self, element_class->GetClassLoader());
+ Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
if (UNLIKELY(array_class == NULL)) {
CHECK(self->IsExceptionPending());
return NULL;
}
// create the array
- Array* new_array = RecursiveCreateMultiArray(self, array_class, 0, dimensions);
+ SirtRef<mirror::IntArray> sirt_dimensions(self, dimensions);
+ Array* new_array = RecursiveCreateMultiArray(self, array_class, 0, sirt_dimensions);
if (UNLIKELY(new_array == NULL)) {
CHECK(self->IsExceptionPending());
return NULL;
@@ -112,7 +115,7 @@ void Array::ThrowArrayStoreException(Object* object) const {
template<typename T>
PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
DCHECK(array_class_ != NULL);
- Array* raw_array = Array::Alloc(self, array_class_, length, sizeof(T));
+ Array* raw_array = Array::Alloc<kMovingCollector, true>(self, array_class_, length, sizeof(T));
return down_cast<PrimitiveArray<T>*>(raw_array);
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 570dcaa292..584a4c095b 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -24,28 +24,15 @@ namespace mirror {
class MANAGED Array : public Object {
public:
- // A convenience for code that doesn't know the component size,
- // and doesn't want to have to work it out itself.
+ // A convenience for code that doesn't know the component size, and doesn't want to have to work
+ // it out itself.
+ template <bool kIsMovable, bool kIsInstrumented>
static Array* Alloc(Thread* self, Class* array_class, int32_t component_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocInstrumented(self, array_class, component_count);
- }
- static Array* AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Array* AllocInstrumented(Thread* self, Class* array_class, int32_t component_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <bool kIsMovable, bool kIsInstrumented>
static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocInstrumented(self, array_class, component_count, component_size);
- }
- static Array* AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Array* AllocInstrumented(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t component_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 7f3a302768..406ab1bbb3 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -357,14 +357,23 @@ inline void Class::CheckObjectAlloc() {
DCHECK_GE(this->object_size_, sizeof(Object));
}
-inline Object* Class::AllocObjectInstrumented(Thread* self) {
+template <bool kIsMovable, bool kIsInstrumented>
+inline Object* Class::Alloc(Thread* self) {
CheckObjectAlloc();
- return Runtime::Current()->GetHeap()->AllocObjectInstrumented(self, this, this->object_size_);
-}
-
-inline Object* Class::AllocObjectUninstrumented(Thread* self) {
- CheckObjectAlloc();
- return Runtime::Current()->GetHeap()->AllocObjectUninstrumented(self, this, this->object_size_);
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (kIsMovable) {
+ if (kIsInstrumented) {
+ return heap->AllocMovableObjectInstrumented(self, this, this->object_size_);
+ } else {
+ return heap->AllocMovableObjectUninstrumented(self, this, this->object_size_);
+ }
+ } else {
+ if (kIsInstrumented) {
+ return heap->AllocNonMovableObjectInstrumented(self, this, this->object_size_);
+ } else {
+ return heap->AllocNonMovableObjectUninstrumented(self, this, this->object_size_);
+ }
+ }
}
} // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f3cb54aa15..cdc5ab2b39 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -52,7 +52,8 @@ void Class::ResetClass() {
void Class::SetStatus(Status new_status, Thread* self) {
Status old_status = GetStatus();
- bool class_linker_initialized = Runtime::Current()->GetClassLinker() != nullptr;
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ bool class_linker_initialized = class_linker != nullptr && class_linker->IsInitialized();
if (LIKELY(class_linker_initialized)) {
if (UNLIKELY(new_status <= old_status && new_status != kStatusError)) {
LOG(FATAL) << "Unexpected change back of class status for " << PrettyClass(this) << " "
@@ -588,7 +589,6 @@ ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex
ArtField* Class::FindStaticField(const StringPiece& name, const StringPiece& type) {
// Is the field in this class (or its interfaces), or any of its
// superclasses (or their interfaces)?
- ClassHelper kh;
for (Class* k = this; k != NULL; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(name, type);
@@ -596,7 +596,7 @@ ArtField* Class::FindStaticField(const StringPiece& name, const StringPiece& typ
return f;
}
// Is this field in any of this class' interfaces?
- kh.ChangeClass(k);
+ ClassHelper kh(k);
for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) {
Class* interface = kh.GetDirectInterface(i);
f = interface->FindStaticField(name, type);
@@ -609,7 +609,6 @@ ArtField* Class::FindStaticField(const StringPiece& name, const StringPiece& typ
}
ArtField* Class::FindStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) {
- ClassHelper kh;
for (Class* k = this; k != NULL; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx);
@@ -617,7 +616,7 @@ ArtField* Class::FindStaticField(const DexCache* dex_cache, uint32_t dex_field_i
return f;
}
// Is this field in any of this class' interfaces?
- kh.ChangeClass(k);
+ ClassHelper kh(k);
for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) {
Class* interface = kh.GetDirectInterface(i);
f = interface->FindStaticField(dex_cache, dex_field_idx);
@@ -631,7 +630,6 @@ ArtField* Class::FindStaticField(const DexCache* dex_cache, uint32_t dex_field_i
ArtField* Class::FindField(const StringPiece& name, const StringPiece& type) {
// Find a field using the JLS field resolution order
- ClassHelper kh;
for (Class* k = this; k != NULL; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredInstanceField(name, type);
@@ -643,7 +641,7 @@ ArtField* Class::FindField(const StringPiece& name, const StringPiece& type) {
return f;
}
// Is this field in any of this class' interfaces?
- kh.ChangeClass(k);
+ ClassHelper kh(k);
for (uint32_t i = 0; i < kh.NumDirectInterfaces(); ++i) {
Class* interface = kh.GetDirectInterface(i);
f = interface->FindStaticField(name, type);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ed1aad39d2..82077dc52a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -378,11 +378,12 @@ class MANAGED Class : public StaticStorageBase {
// Creates a raw object instance but does not invoke the default constructor.
Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectInstrumented(self);
+ return Alloc<kMovingCollector, true>(self);
}
- Object* AllocObjectUninstrumented(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Object* AllocObjectInstrumented(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Creates a raw object instance but does not invoke the default constructor.
+ template <bool kIsMovable, bool kIsInstrumented>
+ Object* Alloc(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsVariableSize() const {
// Classes and arrays vary in size, and so the object_size_ field cannot
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 87d02c9469..385ef5ff89 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -39,38 +39,48 @@
namespace art {
namespace mirror {
-Object* Object::Clone(Thread* self) {
- mirror::Class* c = GetClass();
- DCHECK(!c->IsClassClass());
- // Object::SizeOf gets the right size even if we're an array.
- // Using c->AllocObject() here would be wrong.
- size_t num_bytes = SizeOf();
- gc::Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<mirror::Object> sirt_this(self, this);
- Object* copy = heap->AllocObject(self, c, num_bytes);
- if (UNLIKELY(copy == nullptr)) {
- return nullptr;
- }
+static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Copy instance data. We assume memcpy copies by words.
// TODO: expose and use move32.
- byte* src_bytes = reinterpret_cast<byte*>(sirt_this.get());
- byte* dst_bytes = reinterpret_cast<byte*>(copy);
+ byte* src_bytes = reinterpret_cast<byte*>(src);
+ byte* dst_bytes = reinterpret_cast<byte*>(dest);
size_t offset = sizeof(Object);
memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
+ gc::Heap* heap = Runtime::Current()->GetHeap();
// Perform write barriers on copied object references.
- c = copy->GetClass(); // Re-read Class in case it moved.
+ Class* c = src->GetClass();
if (c->IsArrayClass()) {
if (!c->GetComponentType()->IsPrimitive()) {
- const ObjectArray<Object>* array = copy->AsObjectArray<Object>();
- heap->WriteBarrierArray(copy, 0, array->GetLength());
+ const ObjectArray<Object>* array = dest->AsObjectArray<Object>();
+ heap->WriteBarrierArray(dest, 0, array->GetLength());
}
} else {
- heap->WriteBarrierEveryFieldOf(copy);
+ heap->WriteBarrierEveryFieldOf(dest);
}
if (c->IsFinalizable()) {
- SirtRef<mirror::Object> sirt_copy(self, copy);
- heap->AddFinalizerReference(self, copy);
- return sirt_copy.get();
+ SirtRef<Object> sirt_dest(self, dest);
+ heap->AddFinalizerReference(self, dest);
+ return sirt_dest.get();
+ }
+ return dest;
+}
+
+Object* Object::Clone(Thread* self) {
+ CHECK(!IsClass()) << "Can't clone classes.";
+ // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
+ // be wrong.
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ size_t num_bytes = SizeOf();
+ SirtRef<Object> this_object(self, this);
+ Object* copy;
+ if (heap->IsMovableObject(this)) {
+ copy = heap->AllocObject(self, GetClass(), num_bytes);
+ } else {
+ copy = heap->AllocNonMovableObject(self, GetClass(), num_bytes);
+ }
+ if (LIKELY(copy != nullptr)) {
+ return CopyObject(self, copy, this_object.get(), num_bytes);
}
return copy;
}
@@ -87,8 +97,9 @@ int32_t Object::GenerateIdentityHashCode() {
}
int32_t Object::IdentityHashCode() const {
+ mirror::Object* current_this = const_cast<mirror::Object*>(this);
while (true) {
- LockWord lw = GetLockWord();
+ LockWord lw = current_this->GetLockWord();
switch (lw.GetState()) {
case LockWord::kUnlocked: {
// Try to compare and swap in a new hash, if we succeed we will return the hash on the next
@@ -103,7 +114,10 @@ int32_t Object::IdentityHashCode() const {
case LockWord::kThinLocked: {
// Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
Thread* self = Thread::Current();
- Monitor::InflateThinLocked(self, const_cast<Object*>(this), lw, GenerateIdentityHashCode());
+ SirtRef<mirror::Object> sirt_this(self, current_this);
+ Monitor::InflateThinLocked(self, sirt_this, lw, GenerateIdentityHashCode());
+ // A GC may have occurred when we switched to kBlocked.
+ current_this = sirt_this.get();
break;
}
case LockWord::kFatLocked: {
@@ -115,6 +129,10 @@ int32_t Object::IdentityHashCode() const {
case LockWord::kHashCode: {
return lw.GetHashCode();
}
+ default: {
+ LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
+ break;
+ }
}
}
LOG(FATAL) << "Unreachable";
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e8ea3f2375..0fb203917a 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -30,6 +30,7 @@ class LockWord;
class Monitor;
struct ObjectOffsets;
class Thread;
+template <typename T> class SirtRef;
namespace mirror {
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index abc88a3bf8..478f4ec210 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -23,6 +23,7 @@
#include "mirror/art_field.h"
#include "mirror/class.h"
#include "runtime.h"
+#include "sirt_ref.h"
#include "thread.h"
namespace art {
@@ -30,7 +31,7 @@ namespace mirror {
template<class T>
inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class, int32_t length) {
- Array* array = Array::Alloc(self, object_array_class, length, sizeof(Object*));
+ Array* array = Array::Alloc<kMovingCollector, true>(self, object_array_class, length, sizeof(Object*));
if (UNLIKELY(array == NULL)) {
return NULL;
} else {
@@ -134,9 +135,11 @@ inline void ObjectArray<T>::Copy(const ObjectArray<T>* src, int src_pos,
template<class T>
inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
+ // We may get copied by a compacting GC.
+ SirtRef<ObjectArray<T> > sirt_this(self, this);
ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length);
- if (LIKELY(new_array != NULL)) {
- Copy(this, 0, new_array, 0, std::min(GetLength(), new_length));
+ if (LIKELY(new_array != nullptr)) {
+ Copy(sirt_this.get(), 0, new_array, 0, std::min(sirt_this->GetLength(), new_length));
}
return new_array;
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index d0d1ee4a46..853031701a 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -144,15 +144,15 @@ TEST_F(ObjectTest, AllocObjectArray) {
TEST_F(ObjectTest, AllocArray) {
ScopedObjectAccess soa(Thread::Current());
Class* c = class_linker_->FindSystemClass("[I");
- SirtRef<Array> a(soa.Self(), Array::Alloc(soa.Self(), c, 1));
+ SirtRef<Array> a(soa.Self(), Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
ASSERT_TRUE(c == a->GetClass());
c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
- a.reset(Array::Alloc(soa.Self(), c, 1));
+ a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
ASSERT_TRUE(c == a->GetClass());
c = class_linker_->FindSystemClass("[[Ljava/lang/Object;");
- a.reset(Array::Alloc(soa.Self(), c, 1));
+ a.reset(Array::Alloc<kMovingCollector, true>(soa.Self(), c, 1));
ASSERT_TRUE(c == a->GetClass());
}
@@ -269,8 +269,9 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0];
CHECK(dex_file != NULL);
+ SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<ClassLoader*>(class_loader));
Class* klass =
- class_linker_->FindClass("LStaticsFromCode;", soa.Decode<ClassLoader*>(class_loader));
+ class_linker_->FindClass("LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
ASSERT_TRUE(klass_string_id != NULL);
@@ -392,6 +393,7 @@ TEST_F(ObjectTest, StringLength) {
}
TEST_F(ObjectTest, DescriptorCompare) {
+ // Two classloaders conflicts in compile_time_class_paths_.
ScopedObjectAccess soa(Thread::Current());
ClassLinker* linker = class_linker_;
@@ -400,9 +402,9 @@ TEST_F(ObjectTest, DescriptorCompare) {
SirtRef<ClassLoader> class_loader_1(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_1));
SirtRef<ClassLoader> class_loader_2(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_2));
- Class* klass1 = linker->FindClass("LProtoCompare;", class_loader_1.get());
+ Class* klass1 = linker->FindClass("LProtoCompare;", class_loader_1);
ASSERT_TRUE(klass1 != NULL);
- Class* klass2 = linker->FindClass("LProtoCompare2;", class_loader_2.get());
+ Class* klass2 = linker->FindClass("LProtoCompare2;", class_loader_2);
ASSERT_TRUE(klass2 != NULL);
ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
@@ -468,8 +470,8 @@ TEST_F(ObjectTest, InstanceOf) {
jobject jclass_loader = LoadDex("XandY");
SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
- Class* X = class_linker_->FindClass("LX;", class_loader.get());
- Class* Y = class_linker_->FindClass("LY;", class_loader.get());
+ Class* X = class_linker_->FindClass("LX;", class_loader);
+ Class* Y = class_linker_->FindClass("LY;", class_loader);
ASSERT_TRUE(X != NULL);
ASSERT_TRUE(Y != NULL);
@@ -501,8 +503,8 @@ TEST_F(ObjectTest, IsAssignableFrom) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
- Class* X = class_linker_->FindClass("LX;", class_loader.get());
- Class* Y = class_linker_->FindClass("LY;", class_loader.get());
+ Class* X = class_linker_->FindClass("LX;", class_loader);
+ Class* Y = class_linker_->FindClass("LY;", class_loader);
EXPECT_TRUE(X->IsAssignableFrom(X));
EXPECT_TRUE(X->IsAssignableFrom(Y));
@@ -538,17 +540,17 @@ TEST_F(ObjectTest, IsAssignableFromArray) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
- Class* X = class_linker_->FindClass("LX;", class_loader.get());
- Class* Y = class_linker_->FindClass("LY;", class_loader.get());
+ Class* X = class_linker_->FindClass("LX;", class_loader);
+ Class* Y = class_linker_->FindClass("LY;", class_loader);
ASSERT_TRUE(X != NULL);
ASSERT_TRUE(Y != NULL);
- Class* YA = class_linker_->FindClass("[LY;", class_loader.get());
- Class* YAA = class_linker_->FindClass("[[LY;", class_loader.get());
+ Class* YA = class_linker_->FindClass("[LY;", class_loader);
+ Class* YAA = class_linker_->FindClass("[[LY;", class_loader);
ASSERT_TRUE(YA != NULL);
ASSERT_TRUE(YAA != NULL);
- Class* XAA = class_linker_->FindClass("[[LX;", class_loader.get());
+ Class* XAA = class_linker_->FindClass("[[LX;", class_loader);
ASSERT_TRUE(XAA != NULL);
Class* O = class_linker_->FindSystemClass("Ljava/lang/Object;");
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index 9d76c6bc11..32a50fe470 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -39,19 +39,19 @@ void StackTraceElement::ResetClass() {
}
StackTraceElement* StackTraceElement::Alloc(Thread* self,
- String* declaring_class,
- String* method_name,
- String* file_name,
+ SirtRef<String>& declaring_class,
+ SirtRef<String>& method_name,
+ SirtRef<String>& file_name,
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != NULL)) {
trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- const_cast<String*>(declaring_class), false);
+ declaring_class.get(), false);
trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- const_cast<String*>(method_name), false);
+ method_name.get(), false);
trace->SetFieldObject(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- const_cast<String*>(file_name), false);
+ file_name.get(), false);
trace->SetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number, false);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index a9751f9988..2af512823e 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
#include "object.h"
+#include "sirt_ref.h"
namespace art {
@@ -49,9 +50,9 @@ class MANAGED StackTraceElement : public Object {
}
static StackTraceElement* Alloc(Thread* self,
- String* declaring_class,
- String* method_name,
- String* file_name,
+ SirtRef<String>& declaring_class,
+ SirtRef<String>& method_name,
+ SirtRef<String>& file_name,
int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 9c93f17f8e..b372fe7f34 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -123,8 +123,8 @@ String* String::AllocFromUtf16(Thread* self,
int32_t hash_code) {
CHECK(utf16_data_in != NULL || utf16_length == 0);
String* string = Alloc(self, GetJavaLangString(), utf16_length);
- if (string == NULL) {
- return NULL;
+ if (UNLIKELY(string == nullptr)) {
+ return nullptr;
}
// TODO: use 16-bit wide memset variant
CharArray* array = const_cast<CharArray*>(string->GetCharArray());
@@ -143,8 +143,8 @@ String* String::AllocFromUtf16(Thread* self,
}
String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
- if (utf == NULL) {
- return NULL;
+ if (UNLIKELY(utf == nullptr)) {
+ return nullptr;
}
size_t char_count = CountModifiedUtf8Chars(utf);
return AllocFromModifiedUtf8(self, char_count, utf);
@@ -153,8 +153,8 @@ String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
const char* utf8_data_in) {
String* string = Alloc(self, GetJavaLangString(), utf16_length);
- if (string == NULL) {
- return NULL;
+ if (UNLIKELY(string == nullptr)) {
+ return nullptr;
}
uint16_t* utf16_data_out =
const_cast<uint16_t*>(string->GetCharArray()->GetData());
@@ -164,22 +164,21 @@ String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
}
String* String::Alloc(Thread* self, Class* java_lang_String, int32_t utf16_length) {
- SirtRef<CharArray> array(self, CharArray::Alloc(self, utf16_length));
- if (array.get() == NULL) {
- return NULL;
+ CharArray* array = CharArray::Alloc(self, utf16_length);
+ if (UNLIKELY(array == nullptr)) {
+ return nullptr;
}
- return Alloc(self, java_lang_String, array.get());
+ return Alloc(self, java_lang_String, array);
}
String* String::Alloc(Thread* self, Class* java_lang_String, CharArray* array) {
// Hold reference in case AllocObject causes GC.
SirtRef<CharArray> array_ref(self, array);
String* string = down_cast<String*>(java_lang_String->AllocObject(self));
- if (string == NULL) {
- return NULL;
+ if (LIKELY(string != nullptr)) {
+ string->SetArray(array_ref.get());
+ string->SetCount(array_ref->GetLength());
}
- string->SetArray(array);
- string->SetCount(array->GetLength());
return string;
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 01d8f318ff..7520c4d33c 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -117,10 +117,8 @@ class MANAGED String : public Object {
private:
void SetHashCode(int32_t new_hash_code) {
- DCHECK_EQ(0u,
- GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false));
- SetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_),
- new_hash_code, false);
+ DCHECK_EQ(0u, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false));
+ SetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), new_hash_code, false);
}
void SetCount(int32_t new_count) {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 2abfd3df41..7fada9ef81 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -128,6 +128,10 @@ bool Monitor::Install(Thread* self) {
LOG(FATAL) << "Inflating unlocked lock word";
break;
}
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lw.GetState();
+ return false;
+ }
}
LockWord fat(this);
// Publish the updated lock word, which may race with other threads.
@@ -140,8 +144,7 @@ bool Monitor::Install(Thread* self) {
}
Monitor::~Monitor() {
- CHECK(obj_ != NULL);
- CHECK_EQ(obj_->GetLockWord().GetState(), LockWord::kFatLocked);
+ // Deflated monitors have a null object.
}
/*
@@ -559,6 +562,43 @@ void Monitor::NotifyAll(Thread* self) {
}
}
+bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
+ DCHECK(obj != nullptr);
+ LockWord lw(obj->GetLockWord());
+ // If the lock isn't an inflated monitor, then we don't need to deflate anything.
+ if (lw.GetState() == LockWord::kFatLocked) {
+ Monitor* monitor = lw.FatLockMonitor();
+ CHECK(monitor != nullptr);
+ MutexLock mu(self, monitor->monitor_lock_);
+ Thread* owner = monitor->owner_;
+ if (owner != nullptr) {
+ // Can't deflate if we are locked and have a hash code.
+ if (monitor->HasHashCode()) {
+ return false;
+ }
+ // Can't deflate if our lock count is too high.
+ if (monitor->lock_count_ > LockWord::kThinLockMaxCount) {
+ return false;
+ }
+ // Can't deflate if we have anybody waiting on the CV.
+ if (monitor->monitor_contenders_.GetNumWaiters() > 0) {
+ return false;
+ }
+ // Deflate to a thin lock.
+ obj->SetLockWord(LockWord::FromThinLockId(owner->GetTid(), monitor->lock_count_));
+ } else if (monitor->HasHashCode()) {
+ obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+ } else {
+ // No lock and no hash, just put an empty lock word inside the object.
+ obj->SetLockWord(LockWord());
+ }
+ // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+ // next GC.
+ monitor->obj_ = nullptr;
+ }
+ return true;
+}
+
/*
* Changes the shape of a monitor from thin to fat, preserving the internal lock state. The calling
* thread must own the lock or the owner must be suspended. There's a race with other threads
@@ -577,13 +617,13 @@ void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t
}
}
-void Monitor::InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) {
DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
uint32_t owner_thread_id = lock_word.ThinLockOwner();
if (owner_thread_id == self->GetThreadId()) {
// We own the monitor, we can easily inflate it.
- Inflate(self, self, obj, hash_code);
+ Inflate(self, self, obj.get(), hash_code);
} else {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
@@ -598,7 +638,7 @@ void Monitor::InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock
if (lock_word.GetState() == LockWord::kThinLocked &&
lock_word.ThinLockOwner() == owner_thread_id) {
// Go ahead and inflate the lock.
- Inflate(self, owner, obj, hash_code);
+ Inflate(self, owner, obj.get(), hash_code);
}
thread_list->Resume(owner, false);
}
@@ -611,12 +651,13 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
DCHECK(obj != NULL);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
+ SirtRef<mirror::Object> sirt_obj(self, obj);
while (true) {
- LockWord lock_word = obj->GetLockWord();
+ LockWord lock_word = sirt_obj->GetLockWord();
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0));
- if (obj->CasLockWord(lock_word, thin_locked)) {
+ if (sirt_obj->CasLockWord(lock_word, thin_locked)) {
return; // Success!
}
continue; // Go again.
@@ -628,11 +669,11 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- obj->SetLockWord(thin_locked);
+ sirt_obj->SetLockWord(thin_locked);
return; // Success!
} else {
// We'd overflow the recursion count, so inflate the monitor.
- InflateThinLocked(self, obj, lock_word, 0);
+ InflateThinLocked(self, sirt_obj, lock_word, 0);
}
} else {
// Contention.
@@ -642,7 +683,7 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
NanoSleep(1000); // Sleep for 1us and re-attempt.
} else {
contention_count = 0;
- InflateThinLocked(self, obj, lock_word, 0);
+ InflateThinLocked(self, sirt_obj, lock_word, 0);
}
}
continue; // Start from the beginning.
@@ -654,9 +695,13 @@ void Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
}
case LockWord::kHashCode: {
// Inflate with the existing hashcode.
- Inflate(self, nullptr, obj, lock_word.GetHashCode());
+ Inflate(self, nullptr, sirt_obj.get(), lock_word.GetHashCode());
break;
}
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+ return;
+ }
}
}
}
@@ -666,11 +711,12 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
DCHECK(obj != NULL);
LockWord lock_word = obj->GetLockWord();
+ SirtRef<mirror::Object> sirt_obj(self, obj);
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
case LockWord::kUnlocked:
- FailedUnlock(obj, self, NULL, NULL);
+ FailedUnlock(sirt_obj.get(), self, NULL, NULL);
return false; // Failure.
case LockWord::kThinLocked: {
uint32_t thread_id = self->GetThreadId();
@@ -679,16 +725,16 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
// TODO: there's a race here with the owner dying while we unlock.
Thread* owner =
Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
- FailedUnlock(obj, self, owner, NULL);
+ FailedUnlock(sirt_obj.get(), self, owner, NULL);
return false; // Failure.
} else {
// We own the lock, decrease the recursion count.
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- obj->SetLockWord(thin_locked);
+ sirt_obj->SetLockWord(thin_locked);
} else {
- obj->SetLockWord(LockWord());
+ sirt_obj->SetLockWord(LockWord());
}
return true; // Success!
}
@@ -697,9 +743,10 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
Monitor* mon = lock_word.FatLockMonitor();
return mon->Unlock(self);
}
- default:
- LOG(FATAL) << "Unreachable";
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
return false;
+ }
}
}
@@ -733,6 +780,10 @@ void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
}
case LockWord::kFatLocked:
break; // Already set for a wait.
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+ return;
+ }
}
Monitor* mon = lock_word.FatLockMonitor();
mon->Wait(self, ms, ns, interruptShouldThrow, why);
@@ -769,6 +820,10 @@ void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
}
return; // Success.
}
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+ return;
+ }
}
}
@@ -787,9 +842,10 @@ uint32_t Monitor::GetLockOwnerThreadId(mirror::Object* obj) {
Monitor* mon = lock_word.FatLockMonitor();
return mon->GetOwnerThreadId();
}
- default:
+ default: {
LOG(FATAL) << "Unreachable";
return ThreadList::kInvalidThreadId;
+ }
}
}
@@ -1011,7 +1067,8 @@ void MonitorList::SweepMonitorList(RootVisitor visitor, void* arg) {
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
mirror::Object* obj = m->GetObject();
- mirror::Object* new_obj = visitor(obj, arg);
+ // The object of a monitor can be null if we have deflated it.
+ mirror::Object* new_obj = obj != nullptr ? visitor(obj, arg) : nullptr;
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
<< m->GetObject();
@@ -1031,6 +1088,8 @@ MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
switch (lock_word.GetState()) {
case LockWord::kUnlocked:
// Fall-through.
+ case LockWord::kForwardingAddress:
+ // Fall-through.
case LockWord::kHashCode:
break;
case LockWord::kThinLocked:
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 09cfafa042..d7de8a50cd 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -27,6 +27,7 @@
#include "atomic_integer.h"
#include "base/mutex.h"
#include "root_visitor.h"
+#include "sirt_ref.h"
#include "thread_state.h"
namespace art {
@@ -107,9 +108,12 @@ class Monitor {
return hash_code_.load() != 0;
}
- static void InflateThinLocked(Thread* self, mirror::Object* obj, LockWord lock_word,
+ static void InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
+ static bool Deflate(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
explicit Monitor(Thread* owner, mirror::Object* obj, int32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index ab5eab3955..c9e0e83b27 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -161,7 +161,7 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
class_linker->RegisterDexFile(*dex_file);
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(javaLoader));
mirror::Class* result = class_linker->DefineClass(descriptor.c_str(), class_loader, *dex_file,
*dex_class_def);
VLOG(class_linker) << "DexFile_defineClassNative returning " << result;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index aef000cf10..f0efdc2074 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -53,18 +53,9 @@ static void VMRuntime_startJitCompilation(JNIEnv*, jobject) {
static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) {
}
-static jobject VMRuntime_newNonMovableArray(JNIEnv* env,
- jobject,
- jclass javaElementClass,
+static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaElementClass,
jint length) {
ScopedFastNativeObjectAccess soa(env);
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: right now, we don't have a copying collector, so there's no need
- // to do anything special here, but we ought to pass the non-movability
- // through to the allocator.
- UNIMPLEMENTED(FATAL);
-#endif
-
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (element_class == NULL) {
ThrowNullPointerException(NULL, "element class == null");
@@ -74,13 +65,13 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env,
ThrowNegativeArraySizeException(length);
return NULL;
}
-
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::string descriptor;
descriptor += "[";
descriptor += ClassHelper(element_class).GetDescriptor();
- mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), NULL);
- mirror::Array* result = mirror::Array::Alloc(soa.Self(), array_class, length);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
+ mirror::Array* result = mirror::Array::Alloc<false, true>(soa.Self(), array_class, length);
return soa.AddLocalReference<jobject>(result);
}
@@ -94,7 +85,10 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ThrowIllegalArgumentException(NULL, "not an array");
return 0;
}
- // TODO: we should also check that this is a non-movable array.
+ if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
+ ThrowRuntimeException("Trying to get address of movable array object");
+ return 0;
+ }
return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize()));
}
@@ -172,28 +166,7 @@ static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
}
static void VMRuntime_trimHeap(JNIEnv*, jobject) {
- uint64_t start_ns = NanoTime();
-
- // Trim the managed heap.
- gc::Heap* heap = Runtime::Current()->GetHeap();
- float managed_utilization = (static_cast<float>(heap->GetBytesAllocated()) /
- heap->GetTotalMemory());
- size_t managed_reclaimed = heap->Trim();
-
- uint64_t gc_heap_end_ns = NanoTime();
-
- // Trim the native heap.
- dlmalloc_trim(0);
- size_t native_reclaimed = 0;
- dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
-
- uint64_t end_ns = NanoTime();
-
- LOG(INFO) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
- << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
- << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
- << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
- << "%.";
+ Runtime::Current()->GetHeap()->Trim();
}
static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
@@ -212,7 +185,7 @@ static mirror::Object* PreloadDexCachesStringsVisitor(mirror::Object* root, void
}
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(mirror::DexCache* dex_cache,
+static void PreloadDexCachesResolveString(SirtRef<mirror::DexCache>& dex_cache,
uint32_t string_idx,
StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -260,7 +233,7 @@ static void PreloadDexCachesResolveType(mirror::DexCache* dex_cache, uint32_t ty
}
// Based on ClassLinker::ResolveField.
-static void PreloadDexCachesResolveField(mirror::DexCache* dex_cache,
+static void PreloadDexCachesResolveField(SirtRef<mirror::DexCache>& dex_cache,
uint32_t field_idx,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -275,9 +248,9 @@ static void PreloadDexCachesResolveField(mirror::DexCache* dex_cache,
return;
}
if (is_static) {
- field = klass->FindStaticField(dex_cache, field_idx);
+ field = klass->FindStaticField(dex_cache.get(), field_idx);
} else {
- field = klass->FindInstanceField(dex_cache, field_idx);
+ field = klass->FindInstanceField(dex_cache.get(), field_idx);
}
if (field == NULL) {
return;
@@ -287,7 +260,7 @@ static void PreloadDexCachesResolveField(mirror::DexCache* dex_cache,
}
// Based on ClassLinker::ResolveMethod.
-static void PreloadDexCachesResolveMethod(mirror::DexCache* dex_cache,
+static void PreloadDexCachesResolveMethod(SirtRef<mirror::DexCache>& dex_cache,
uint32_t method_idx,
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -304,14 +277,14 @@ static void PreloadDexCachesResolveMethod(mirror::DexCache* dex_cache,
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache, method_idx);
+ method = klass->FindDirectMethod(dex_cache.get(), method_idx);
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache, method_idx);
+ method = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache, method_idx);
+ method = klass->FindVirtualMethod(dex_cache.get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -430,6 +403,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
Runtime* runtime = Runtime::Current();
ClassLinker* linker = runtime->GetClassLinker();
+ Thread* self = ThreadForEnv(env);
// We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
StringTable strings;
@@ -441,7 +415,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != NULL);
- mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
+ SirtRef<mirror::DexCache> dex_cache(self, linker->FindDexCache(*dex_file));
if (kPreloadDexCachesStrings) {
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
@@ -451,7 +425,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
if (kPreloadDexCachesTypes) {
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- PreloadDexCachesResolveType(dex_cache, i);
+ PreloadDexCachesResolveType(dex_cache.get(), i);
}
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 3591611185..33891078f8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -61,7 +61,8 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
}
std::string descriptor(DotToDescriptor(name.c_str()));
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(javaLoader));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* c = class_linker->FindClass(descriptor.c_str(), class_loader);
if (c == NULL) {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index a2d6b18026..808c9170d9 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -52,13 +52,15 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl
descriptor += ClassHelper(element_class).GetDescriptor();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), element_class->GetClassLoader());
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), element_class->GetClassLoader());
+ mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
if (UNLIKELY(array_class == NULL)) {
CHECK(soa.Self()->IsExceptionPending());
return NULL;
}
DCHECK(array_class->IsArrayClass());
- mirror::Array* new_array = mirror::Array::Alloc(soa.Self(), array_class, length);
+ mirror::Array* new_array = mirror::Array::Alloc<kMovingCollector, true>(
+ soa.Self(), array_class, length);
return soa.AddLocalReference<jobject>(new_array);
}
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index a92823a85d..809369ae81 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -23,20 +23,12 @@
namespace art {
-static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring javaName,
- jobjectArray javaInterfaces, jobject javaLoader,
- jobjectArray javaMethods, jobjectArray javaThrows) {
+static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArray interfaces,
+ jobject loader, jobjectArray methods, jobjectArray throws) {
ScopedObjectAccess soa(env);
- mirror::String* name = soa.Decode<mirror::String*>(javaName);
- mirror::ObjectArray<mirror::Class>* interfaces =
- soa.Decode<mirror::ObjectArray<mirror::Class>*>(javaInterfaces);
- mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
- mirror::ObjectArray<mirror::ArtMethod>* methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(javaMethods);
- mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws =
- soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(javaThrows);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* result = class_linker->CreateProxyClass(name, interfaces, loader, methods, throws);
+ mirror::Class* result = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods,
+ throws);
return soa.AddLocalReference<jclass>(result);
}
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index d941ec31f0..1658d96de5 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -63,10 +63,6 @@ class ScopedFastNativeObjectAccess {
Locks::mutator_lock_->AssertSharedHeld(Self());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we should make these unique weak globals if Field instances can ever move.
- UNIMPLEMENTED(WARNING);
-#endif
return reinterpret_cast<mirror::ArtField*>(fid);
}
@@ -83,6 +79,10 @@ class ScopedFastNativeObjectAccess {
return NULL;
}
+ if (kIsDebugBuild) {
+ Runtime::Current()->GetHeap()->VerifyObject(obj);
+ }
+
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
IndirectReferenceTable& locals = Env()->locals;
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index f724776f2e..e37510c228 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -67,12 +67,9 @@ class ObjectLock {
class ClassHelper {
public:
- ClassHelper(const mirror::Class* c = NULL, ClassLinker* l = NULL)
+ explicit ClassHelper(const mirror::Class* c )
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : class_linker_(l),
- dex_cache_(NULL),
- dex_file_(NULL),
- interface_type_list_(NULL),
+ : interface_type_list_(NULL),
klass_(NULL) {
if (c != NULL) {
ChangeClass(c);
@@ -82,13 +79,9 @@ class ClassHelper {
void ChangeClass(const mirror::Class* new_c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(new_c != NULL) << "klass_=" << klass_; // Log what we were changing from if any
- CHECK(new_c->IsClass()) << "new_c=" << new_c;
- if (dex_cache_ != NULL) {
- mirror::DexCache* new_c_dex_cache = new_c->GetDexCache();
- if (new_c_dex_cache != dex_cache_) {
- dex_cache_ = new_c_dex_cache;
- dex_file_ = NULL;
- }
+ if (!new_c->IsClass()) {
+ LOG(FATAL) << "new_c=" << new_c << " cc " << new_c->GetClass() << " ccc "
+ << ((new_c->GetClass() != nullptr) ? new_c->GetClass()->GetClass() : NULL);
}
klass_ = new_c;
interface_type_list_ = NULL;
@@ -201,20 +194,11 @@ class ClassHelper {
}
const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (dex_file_ == NULL) {
- dex_file_ = GetDexCache()->GetDexFile();
- }
- return *dex_file_;
+ return *GetDexCache()->GetDexFile();
}
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* result = dex_cache_;
- if (result == NULL) {
- DCHECK(klass_ != NULL);
- result = klass_->GetDexCache();
- dex_cache_ = result;
- }
- return result;
+ return klass_->GetDexCache();
}
private:
@@ -231,18 +215,10 @@ class ClassHelper {
return result;
}
- ClassLinker* GetClassLinker() {
- ClassLinker* result = class_linker_;
- if (result == NULL) {
- result = Runtime::Current()->GetClassLinker();
- class_linker_ = result;
- }
- return result;
+ ClassLinker* GetClassLinker() ALWAYS_INLINE {
+ return Runtime::Current()->GetClassLinker();
}
- ClassLinker* class_linker_;
- mirror::DexCache* dex_cache_;
- const DexFile* dex_file_;
const DexFile::TypeList* interface_type_list_;
const mirror::Class* klass_;
std::string descriptor_;
@@ -252,20 +228,11 @@ class ClassHelper {
class FieldHelper {
public:
- FieldHelper() : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), field_(NULL) {}
- explicit FieldHelper(const mirror::ArtField* f) : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), field_(f) {}
- FieldHelper(const mirror::ArtField* f, ClassLinker* l)
- : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), field_(f) {}
+ FieldHelper() : field_(NULL) {}
+ explicit FieldHelper(const mirror::ArtField* f) : field_(f) {}
void ChangeField(const mirror::ArtField* new_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(new_f != NULL);
- if (dex_cache_ != NULL) {
- mirror::DexCache* new_f_dex_cache = new_f->GetDeclaringClass()->GetDexCache();
- if (new_f_dex_cache != dex_cache_) {
- dex_cache_ = new_f_dex_cache;
- dex_file_ = NULL;
- }
- }
field_ = new_f;
}
@@ -343,31 +310,14 @@ class FieldHelper {
private:
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* result = dex_cache_;
- if (result == NULL) {
- result = field_->GetDeclaringClass()->GetDexCache();
- dex_cache_ = result;
- }
- return result;
+ return field_->GetDeclaringClass()->GetDexCache();
}
- ClassLinker* GetClassLinker() {
- ClassLinker* result = class_linker_;
- if (result == NULL) {
- result = Runtime::Current()->GetClassLinker();
- class_linker_ = result;
- }
- return result;
+ ClassLinker* GetClassLinker() ALWAYS_INLINE {
+ return Runtime::Current()->GetClassLinker();
}
const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (dex_file_ == NULL) {
- dex_file_ = GetDexCache()->GetDexFile();
- }
- return *dex_file_;
+ return *GetDexCache()->GetDexFile();
}
-
- ClassLinker* class_linker_;
- mirror::DexCache* dex_cache_;
- const DexFile* dex_file_;
const mirror::ArtField* field_;
std::string declaring_class_descriptor_;
@@ -377,38 +327,17 @@ class FieldHelper {
class MethodHelper {
public:
MethodHelper()
- : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL),
+ : method_(NULL), shorty_(NULL),
shorty_len_(0) {}
explicit MethodHelper(const mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : class_linker_(NULL), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL),
- shorty_len_(0) {
- SetMethod(m);
- }
-
- MethodHelper(const mirror::ArtMethod* m, ClassLinker* l)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : class_linker_(l), dex_cache_(NULL), dex_file_(NULL), method_(NULL), shorty_(NULL),
- shorty_len_(0) {
+ : method_(NULL), shorty_(NULL), shorty_len_(0) {
SetMethod(m);
}
void ChangeMethod(mirror::ArtMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(new_m != NULL);
- if (dex_cache_ != NULL) {
- mirror::Class* klass = new_m->GetDeclaringClass();
- if (klass->IsProxyClass()) {
- dex_cache_ = NULL;
- dex_file_ = NULL;
- } else {
- mirror::DexCache* new_m_dex_cache = klass->GetDexCache();
- if (new_m_dex_cache != dex_cache_) {
- dex_cache_ = new_m_dex_cache;
- dex_file_ = NULL;
- }
- }
- }
SetMethod(new_m);
shorty_ = NULL;
}
@@ -444,7 +373,8 @@ class MethodHelper {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
- return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, GetDexCache());
+ SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, dex_cache);
}
const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -622,28 +552,18 @@ class MethodHelper {
}
const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const DexFile* result = dex_file_;
- if (result == NULL) {
- const mirror::DexCache* dex_cache = GetDexCache();
- result = dex_file_ = dex_cache->GetDexFile();
- }
- return *result;
+ return *GetDexCache()->GetDexFile();
}
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* result = dex_cache_;
- if (result == NULL) {
- mirror::Class* klass = method_->GetDeclaringClass();
- result = klass->GetDexCache();
- dex_cache_ = result;
- }
- return result;
+ return method_->GetDeclaringClass()->GetDexCache();
}
mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* s = method_->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(s == NULL)) {
- s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, GetDexCache());
+ SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, dex_cache);
}
return s;
}
@@ -705,18 +625,10 @@ class MethodHelper {
method_ = method;
}
- ClassLinker* GetClassLinker() {
- ClassLinker* result = class_linker_;
- if (result == NULL) {
- result = Runtime::Current()->GetClassLinker();
- class_linker_ = result;
- }
- return result;
+ ClassLinker* GetClassLinker() ALWAYS_INLINE {
+ return Runtime::Current()->GetClassLinker();
}
- ClassLinker* class_linker_;
- mirror::DexCache* dex_cache_;
- const DexFile* dex_file_;
const mirror::ArtMethod* method_;
const char* shorty_;
uint32_t shorty_len_;
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index e95fdb9226..6f65bff899 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -233,7 +233,7 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
for (auto& ref : entries_) {
- ref = visitor(const_cast<mirror::Object*>(ref), arg);
+ ref = visitor(ref, arg);
}
}
diff --git a/runtime/root_visitor.h b/runtime/root_visitor.h
index a2d898b43c..d52f351151 100644
--- a/runtime/root_visitor.h
+++ b/runtime/root_visitor.h
@@ -23,11 +23,13 @@ class Object;
} // namespace mirror
class StackVisitor;
+// Returns the new address of the object, returns root if it has not moved.
typedef mirror::Object* (RootVisitor)(mirror::Object* root, void* arg)
__attribute__((warn_unused_result));
typedef void (VerifyRootVisitor)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor);
typedef bool (IsMarkedTester)(const mirror::Object* object, void* arg);
+typedef void (ObjectVisitorCallback)(mirror::Object* obj, void* arg);
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 53c9b2efbb..8e39023cf5 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -121,7 +121,7 @@ Runtime::~Runtime() {
Trace::Shutdown();
// Make sure to let the GC complete if it is running.
- heap_->WaitForConcurrentGcToComplete(self);
+ heap_->WaitForGcToComplete(self);
heap_->DeleteThreadPool();
// Make sure our internal threads are dead before we start tearing down things they're using.
@@ -821,6 +821,11 @@ void Runtime::StartSignalCatcher() {
}
}
+bool Runtime::IsShuttingDown(Thread* self) {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ return IsShuttingDownLocked();
+}
+
void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads entering";
@@ -861,7 +866,6 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
is_compiler_ = options->is_compiler_;
is_zygote_ = options->is_zygote_;
- is_concurrent_gc_enabled_ = options->is_concurrent_gc_enabled_;
is_explicit_gc_disabled_ = options->is_explicit_gc_disabled_;
compiler_filter_ = options->compiler_filter_;
@@ -926,12 +930,13 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
GetHeap()->EnableObjectValidation();
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
- if (GetHeap()->GetContinuousSpaces()[0]->IsImageSpace()) {
- class_linker_ = ClassLinker::CreateFromImage(intern_table_);
+ class_linker_ = new ClassLinker(intern_table_);
+ if (GetHeap()->HasImageSpace()) {
+ class_linker_->InitFromImage();
} else {
CHECK(options->boot_class_path_ != NULL);
CHECK_NE(options->boot_class_path_->size(), 0U);
- class_linker_ = ClassLinker::CreateFromCompiler(*options->boot_class_path_, intern_table_);
+ class_linker_->InitFromCompiler(*options->boot_class_path_);
}
CHECK(class_linker_ != NULL);
verifier::MethodVerifier::Init();
@@ -1174,16 +1179,20 @@ void Runtime::VisitNonThreadRoots(RootVisitor* visitor, void* arg) {
visitor(pre_allocated_OutOfMemoryError_, arg));
DCHECK(pre_allocated_OutOfMemoryError_ != nullptr);
}
- resolution_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(resolution_method_, arg));
+ resolution_method_ = down_cast<mirror::ArtMethod*>(visitor(resolution_method_, arg));
DCHECK(resolution_method_ != nullptr);
- imt_conflict_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(imt_conflict_method_, arg));
- DCHECK(imt_conflict_method_ != nullptr);
- default_imt_ = reinterpret_cast<mirror::ObjectArray<mirror::ArtMethod>*>(visitor(default_imt_, arg));
- DCHECK(default_imt_ != nullptr);
+ if (HasImtConflictMethod()) {
+ imt_conflict_method_ = down_cast<mirror::ArtMethod*>(visitor(imt_conflict_method_, arg));
+ }
+ if (HasDefaultImt()) {
+ default_imt_ = down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(visitor(default_imt_, arg));
+ }
+
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- callee_save_methods_[i] = reinterpret_cast<mirror::ArtMethod*>(
- visitor(callee_save_methods_[i], arg));
- DCHECK(callee_save_methods_[i] != nullptr);
+ if (callee_save_methods_[i] != nullptr) {
+ callee_save_methods_[i] = down_cast<mirror::ArtMethod*>(
+ visitor(callee_save_methods_[i], arg));
+ }
}
}
@@ -1201,49 +1210,45 @@ mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* c
Thread* self = Thread::Current();
SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, cl->AllocArtMethodArray(self, 64));
mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
- for (size_t i = 0; i < 64; i++) {
+ for (size_t i = 0; i < static_cast<size_t>(imtable->GetLength()); i++) {
imtable->Set(i, imt_conflict_method);
}
return imtable.get();
}
mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
- mirror::Class* method_class = mirror::ArtMethod::GetJavaLangReflectArtMethod();
Thread* self = Thread::Current();
- SirtRef<mirror::ArtMethod>
- method(self, down_cast<mirror::ArtMethod*>(method_class->AllocObject(self)));
- method->SetDeclaringClass(method_class);
+ Runtime* r = Runtime::Current();
+ ClassLinker* cl = r->GetClassLinker();
+ SirtRef<mirror::ArtMethod> method(self, cl->AllocArtMethod(self));
+ method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for imt conflict method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- Runtime* r = Runtime::Current();
- ClassLinker* cl = r->GetClassLinker();
method->SetEntryPointFromCompiledCode(r->IsCompiler() ? NULL : GetImtConflictTrampoline(cl));
return method.get();
}
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
- mirror::Class* method_class = mirror::ArtMethod::GetJavaLangReflectArtMethod();
Thread* self = Thread::Current();
- SirtRef<mirror::ArtMethod>
- method(self, down_cast<mirror::ArtMethod*>(method_class->AllocObject(self)));
- method->SetDeclaringClass(method_class);
+ Runtime* r = Runtime::Current();
+ ClassLinker* cl = r->GetClassLinker();
+ SirtRef<mirror::ArtMethod> method(self, cl->AllocArtMethod(self));
+ method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- Runtime* r = Runtime::Current();
- ClassLinker* cl = r->GetClassLinker();
method->SetEntryPointFromCompiledCode(r->IsCompiler() ? NULL : GetResolutionTrampoline(cl));
return method.get();
}
mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set,
- CalleeSaveType type) {
- mirror::Class* method_class = mirror::ArtMethod::GetJavaLangReflectArtMethod();
+ CalleeSaveType type) {
Thread* self = Thread::Current();
- SirtRef<mirror::ArtMethod>
- method(self, down_cast<mirror::ArtMethod*>(method_class->AllocObject(self)));
- method->SetDeclaringClass(method_class);
+ Runtime* r = Runtime::Current();
+ ClassLinker* cl = r->GetClassLinker();
+ SirtRef<mirror::ArtMethod> method(self, cl->AllocArtMethod(self));
+ method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for callee saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
method->SetEntryPointFromCompiledCode(NULL);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 24b4c87666..d025d47b75 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -149,10 +149,6 @@ class Runtime {
return is_zygote_;
}
- bool IsConcurrentGcEnabled() const {
- return is_concurrent_gc_enabled_;
- }
-
bool IsExplicitGcDisabled() const {
return is_explicit_gc_disabled_;
}
@@ -203,7 +199,8 @@ class Runtime {
// Starts a runtime, which may cause threads to be started and code to run.
bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
- bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
+ bool IsShuttingDown(Thread* self);
+ bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
return shutting_down_;
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index c39cdb2679..1ca6c4e4fa 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -34,9 +34,8 @@ class ScopedThreadStateChange {
if (UNLIKELY(self_ == NULL)) {
// Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
old_thread_state_ = kTerminated;
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown());
+ CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
bool runnable_transition;
DCHECK_EQ(self, Thread::Current());
@@ -63,9 +62,8 @@ class ScopedThreadStateChange {
~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
if (UNLIKELY(self_ == NULL)) {
if (!expected_has_no_thread_) {
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
CHECK(shutting_down);
}
} else {
@@ -167,6 +165,10 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
return NULL;
}
+ if (kIsDebugBuild) {
+ Runtime::Current()->GetHeap()->VerifyObject(obj);
+ }
+
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
IndirectReferenceTable& locals = Env()->locals;
@@ -185,7 +187,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
}
}
#endif
-
if (Vm()->work_around_app_jni_bugs) {
// Hand out direct pointers to support broken old apps.
return reinterpret_cast<T>(obj);
@@ -206,10 +207,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we should make these unique weak globals if Field instances can ever move.
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingFields);
return reinterpret_cast<mirror::ArtField*>(fid);
}
@@ -217,9 +215,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingFields);
return reinterpret_cast<jfieldID>(field);
}
@@ -227,10 +223,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: we should make these unique weak globals if Method instances can ever move.
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingMethods);
return reinterpret_cast<mirror::ArtMethod*>(mid);
}
@@ -238,9 +231,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
-#ifdef MOVING_GARBAGE_COLLECTOR
- UNIMPLEMENTED(WARNING);
-#endif
+ CHECK(!kMovingMethods);
return reinterpret_cast<jmethodID>(method);
}
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index a1f8a6693f..56d81ecacc 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_SIRT_REF_H_
#define ART_RUNTIME_SIRT_REF_H_
+#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
#include "thread.h"
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5d3a9a5234..a50538399c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -22,12 +22,17 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
+#include "runtime.h"
#include "thread_list.h"
#include "throw_location.h"
#include "vmap_table.h"
namespace art {
+bool ShadowFrame::VerifyReference(const mirror::Object* val) const {
+ return !Runtime::Current()->GetHeap()->IsInTempSpace(val);
+}
+
mirror::Object* ShadowFrame::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index a4b93bc407..3d6b06a32d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -150,10 +150,15 @@ class ShadowFrame {
return *reinterpret_cast<unaligned_double*>(vreg);
}
+ template <bool kChecked = false>
mirror::Object* GetVRegReference(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
if (HasReferenceArray()) {
mirror::Object* ref = References()[i];
+ if (kChecked) {
+ CHECK(VerifyReference(ref)) << "VReg " << i << "(" << ref
+ << ") is in protected space, reference array " << true;
+ }
// If the vreg reference is not equal to the vreg then the vreg reference is stale.
if (reinterpret_cast<uint32_t>(ref) != vregs_[i]) {
return nullptr;
@@ -161,7 +166,12 @@ class ShadowFrame {
return ref;
} else {
const uint32_t* vreg = &vregs_[i];
- return *reinterpret_cast<mirror::Object* const*>(vreg);
+ mirror::Object* ref = *reinterpret_cast<mirror::Object* const*>(vreg);
+ if (kChecked) {
+ CHECK(VerifyReference(ref)) << "VReg " << i
+ << "(" << ref << ") is in protected space, reference array " << false;
+ }
+ return ref;
}
}
@@ -174,12 +184,22 @@ class ShadowFrame {
DCHECK_LT(i, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
*reinterpret_cast<int32_t*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i] = nullptr;
+ }
}
void SetVRegFloat(size_t i, float val) {
DCHECK_LT(i, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
*reinterpret_cast<float*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i] = nullptr;
+ }
}
void SetVRegLong(size_t i, int64_t val) {
@@ -188,6 +208,12 @@ class ShadowFrame {
// Alignment attribute required for GCC 4.8
typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_int64*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i] = nullptr;
+ References()[i + 1] = nullptr;
+ }
}
void SetVRegDouble(size_t i, double val) {
@@ -196,10 +222,18 @@ class ShadowFrame {
// Alignment attribute required for GCC 4.8
typedef double unaligned_double __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_double*>(vreg) = val;
+ // This is needed for moving collectors since these can update the vreg references if they
+ // happen to agree with references in the reference array.
+ if (kMovingCollector && HasReferenceArray()) {
+ References()[i] = nullptr;
+ References()[i + 1] = nullptr;
+ }
}
void SetVRegReference(size_t i, mirror::Object* val) {
DCHECK_LT(i, NumberOfVRegs());
+ DCHECK(!kMovingCollector || VerifyReference(val))
+ << "VReg " << i << "(" << val << ") is in protected space";
uint32_t* vreg = &vregs_[i];
*reinterpret_cast<mirror::Object**>(vreg) = val;
if (HasReferenceArray()) {
@@ -280,6 +314,8 @@ class ShadowFrame {
return reinterpret_cast<mirror::Object* const*>(vreg_end);
}
+ bool VerifyReference(const mirror::Object* val) const;
+
mirror::Object** References() {
return const_cast<mirror::Object**>(const_cast<const ShadowFrame*>(this)->References());
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9751076235..1f6dd69110 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -152,7 +152,7 @@ void* Thread::CreateCallback(void* arg) {
MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
- CHECK(!runtime->IsShuttingDown());
+ CHECK(!runtime->IsShuttingDownLocked());
self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
}
@@ -241,7 +241,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
bool thread_start_during_shutdown = false;
{
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- if (runtime->IsShuttingDown()) {
+ if (runtime->IsShuttingDownLocked()) {
thread_start_during_shutdown = true;
} else {
runtime->StartThreadBirth();
@@ -328,7 +328,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
}
{
MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
- if (runtime->IsShuttingDown()) {
+ if (runtime->IsShuttingDownLocked()) {
LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
return NULL;
} else {
@@ -1352,13 +1352,12 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
*stack_depth = depth;
}
- MethodHelper mh;
for (int32_t i = 0; i < depth; ++i) {
mirror::ObjectArray<mirror::Object>* method_trace =
soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
- mh.ChangeMethod(method);
+ MethodHelper mh(method);
mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
uint32_t dex_pc = pc_trace->Get(i);
int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
@@ -1385,11 +1384,8 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
SirtRef<mirror::String> source_name_object(soa.Self(),
mirror::String::AllocFromModifiedUtf8(soa.Self(),
source_file));
- mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
- class_name_object.get(),
- method_name_object.get(),
- source_name_object.get(),
- line_number);
+ mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
+ soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
if (obj == NULL) {
return NULL;
}
@@ -1437,8 +1433,10 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
if (throw_location.GetMethod() != NULL) {
cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
}
+ SirtRef<mirror::ClassLoader> class_loader(this, cl);
SirtRef<mirror::Class>
- exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
+ exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor,
+ class_loader));
if (UNLIKELY(exception_class.get() == NULL)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
@@ -1453,6 +1451,12 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
SirtRef<mirror::Throwable> exception(this,
down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
+ // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
+ if (exception.get() == nullptr) {
+ SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ return;
+ }
+
// Choose an appropriate constructor and set up the arguments.
const char* signature;
SirtRef<mirror::String> msg_string(this, NULL);
@@ -1741,18 +1745,21 @@ class CatchBlockStackVisitor : public StackVisitor {
return true; // Continue stack walk.
}
- bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool HandleDeoptimization(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MethodHelper mh(m);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
CHECK(code_item != NULL);
- uint16_t num_regs = code_item->registers_size_;
+ uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
- verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
- &mh.GetClassDef(), code_item,
- m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
+ SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
+ verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+ &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+ m->GetAccessFlags(), false, true);
verifier.Verify();
std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
for (uint16_t reg = 0; reg < num_regs; reg++) {
@@ -2088,6 +2095,13 @@ class VerifyCallbackVisitor {
void* const arg_;
};
+void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
+ if (kIsDebugBuild) {
+ Runtime::Current()->GetHeap()->VerifyObject(class_loader_override);
+ }
+ class_loader_override_ = class_loader_override;
+}
+
void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
if (opeer_ != nullptr) {
opeer_ = visitor(opeer_, arg);
@@ -2115,10 +2129,9 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
if (frame.this_object_ != nullptr) {
frame.this_object_ = visitor(frame.this_object_, arg);
- DCHECK(frame.this_object_ != nullptr);
}
- frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
DCHECK(frame.method_ != nullptr);
+ frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 3aa137375e..6bd3607922 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -177,34 +177,27 @@ class PACKED(4) Thread {
ALWAYS_INLINE;
// Once called thread suspension will cause an assertion failure.
-#ifndef NDEBUG
const char* StartAssertNoThreadSuspension(const char* cause) {
- CHECK(cause != NULL);
- const char* previous_cause = last_no_thread_suspension_cause_;
- no_thread_suspension_++;
- last_no_thread_suspension_cause_ = cause;
- return previous_cause;
- }
-#else
- const char* StartAssertNoThreadSuspension(const char* cause) {
- CHECK(cause != NULL);
- return NULL;
+ if (kIsDebugBuild) {
+ CHECK(cause != NULL);
+ const char* previous_cause = last_no_thread_suspension_cause_;
+ no_thread_suspension_++;
+ last_no_thread_suspension_cause_ = cause;
+ return previous_cause;
+ } else {
+ return nullptr;
+ }
}
-#endif
// End region where no thread suspension is expected.
-#ifndef NDEBUG
void EndAssertNoThreadSuspension(const char* old_cause) {
- CHECK(old_cause != NULL || no_thread_suspension_ == 1);
- CHECK_GT(no_thread_suspension_, 0U);
- no_thread_suspension_--;
- last_no_thread_suspension_cause_ = old_cause;
- }
-#else
- void EndAssertNoThreadSuspension(const char*) {
+ if (kIsDebugBuild) {
+ CHECK(old_cause != NULL || no_thread_suspension_ == 1);
+ CHECK_GT(no_thread_suspension_, 0U);
+ no_thread_suspension_--;
+ last_no_thread_suspension_cause_ = old_cause;
+ }
}
-#endif
-
void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
@@ -370,9 +363,7 @@ class PACKED(4) Thread {
return class_loader_override_;
}
- void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
- class_loader_override_ = class_loader_override;
- }
+ void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override);
// Create the internal representation of a stack trace, that is more time
// and space efficient to compute than the StackTraceElement[]
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ff1ed2a4d2..dd3f11cb9e 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -74,6 +74,15 @@ pid_t ThreadList::GetLockOwner() {
return Locks::thread_list_lock_->GetExclusiveOwnerTid();
}
+void ThreadList::DumpNativeStacks(std::ostream& os) {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ for (const auto& thread : list_) {
+ os << "DUMPING THREAD " << thread->tid_ << "\n";
+ DumpNativeStack(os, thread->tid_, "\t", true);
+ os << "\n";
+ }
+}
+
void ThreadList::DumpForSigQuit(std::ostream& os) {
{
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
@@ -413,7 +422,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
return thread;
}
if (total_delay_us >= kTimeoutUs) {
- ThreadSuspendByPeerWarning(self, ERROR, "Thread suspension timed out", peer);
+ ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer);
if (did_suspend_request) {
thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
}
@@ -477,7 +486,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
return thread;
}
if (total_delay_us >= kTimeoutUs) {
- ThreadSuspendByThreadIdWarning(ERROR, "Thread suspension timed out", thread_id);
+ ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id);
if (did_suspend_request) {
thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
}
@@ -626,7 +635,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
{
// No more threads can be born after we start to shutdown.
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- CHECK(Runtime::Current()->IsShuttingDown());
+ CHECK(Runtime::Current()->IsShuttingDownLocked());
CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
}
all_threads_are_daemons = true;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index b1b3e88860..45994ae9b7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -124,6 +124,9 @@ class ThreadList {
return list_;
}
+ void DumpNativeStacks(std::ostream& os)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
+
private:
uint32_t AllocThreadId(Thread* self);
void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
diff --git a/runtime/utils.h b/runtime/utils.h
index 6850e8b025..4b39acd5cf 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -122,7 +122,7 @@ struct TypeStaticIf<false, A, B> {
// For rounding integers.
template<typename T>
static inline T RoundDown(T x, int n) {
- CHECK(IsPowerOfTwo(n));
+ DCHECK(IsPowerOfTwo(n));
return (x & -n);
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9f980610a4..9cd8f738d7 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -39,6 +39,7 @@
#include "object_utils.h"
#include "register_line-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
#include "verifier/dex_gc_map.h"
namespace art {
@@ -113,17 +114,15 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const mirror::Class* kla
*error += dex_file.GetLocation();
return kHardFailure;
}
- return VerifyClass(&dex_file,
- kh.GetDexCache(),
- klass->GetClassLoader(),
- class_def,
- allow_soft_failures,
- error);
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
+ return VerifyClass(&dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
}
MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures,
std::string* error) {
@@ -233,8 +232,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
const DexFile* dex_file,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
@@ -243,8 +242,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
MethodVerifier::FailureKind result = kNoFailure;
uint64_t start_ns = NanoTime();
- MethodVerifier verifier_(dex_file, dex_cache, class_loader, class_def, code_item, method_idx,
- method, method_access_flags, true, allow_soft_failures);
+ MethodVerifier verifier_(dex_file, &dex_cache, &class_loader, class_def, code_item,
+ method_idx, method, method_access_flags, true, allow_soft_failures);
if (verifier_.Verify()) {
// Verification completed, however failures may be pending that didn't cause the verification
// to hard fail.
@@ -277,13 +276,14 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
}
void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
- const DexFile* dex_file, mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ const DexFile* dex_file,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
uint32_t method_access_flags) {
- MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
+ MethodVerifier verifier(dex_file, &dex_cache, &class_loader, class_def, code_item,
dex_method_idx, method, method_access_flags, true, true);
verifier.Verify();
verifier.DumpFailures(os);
@@ -291,13 +291,12 @@ void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_i
verifier.Dump(os);
}
-MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+MethodVerifier::MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
+ SirtRef<mirror::ClassLoader>* class_loader,
const DexFile::ClassDef* class_def,
- const DexFile::CodeItem* code_item,
- uint32_t dex_method_idx, mirror::ArtMethod* method,
- uint32_t method_access_flags, bool can_load_classes,
- bool allow_soft_failures)
+ const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
+ mirror::ArtMethod* method, uint32_t method_access_flags,
+ bool can_load_classes, bool allow_soft_failures)
: reg_types_(can_load_classes),
work_insn_idx_(-1),
dex_method_idx_(dex_method_idx),
@@ -323,12 +322,19 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca
DCHECK(class_def != nullptr);
}
+MethodVerifier::~MethodVerifier() {
+ STLDeleteElements(&failure_messages_);
+}
+
void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>& monitor_enter_dex_pcs) {
MethodHelper mh(m);
- MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
- &mh.GetClassDef(), mh.GetCodeItem(), m->GetDexMethodIndex(),
- m, m->GetAccessFlags(), false, true);
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
+ mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
+ true);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = &monitor_enter_dex_pcs;
verifier.FindLocksAtDexPc();
@@ -348,9 +354,12 @@ void MethodVerifier::FindLocksAtDexPc() {
mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
MethodHelper mh(m);
- MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
- &mh.GetClassDef(), mh.GetCodeItem(), m->GetDexMethodIndex(),
- m, m->GetAccessFlags(), false, true);
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
+ mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
+ true);
return verifier.FindAccessedFieldAtDexPc(dex_pc);
}
@@ -374,11 +383,14 @@ mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
}
mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
- uint32_t dex_pc) {
+ uint32_t dex_pc) {
MethodHelper mh(m);
- MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
- &mh.GetClassDef(), mh.GetCodeItem(), m->GetDexMethodIndex(),
- m, m->GetAccessFlags(), false, true);
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
+ mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
+ true);
return verifier.FindInvokedMethodAtDexPc(dex_pc);
}
@@ -589,7 +601,7 @@ bool MethodVerifier::ScanTryCatchBlocks() {
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
mirror::Class* exception_type = linker->ResolveType(*dex_file_,
iterator.GetHandlerTypeIndex(),
- dex_cache_, class_loader_);
+ *dex_cache_, *class_loader_);
if (exception_type == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
Thread::Current()->ClearException();
@@ -1211,7 +1223,8 @@ bool MethodVerifier::SetTypesFromSignature() {
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ false);
reg_line->SetRegisterType(arg_start + cur_arg, reg_type);
}
break;
@@ -1853,7 +1866,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ const RegType& component_type = reg_types_.GetComponentType(array_type,
+ class_loader_->get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
@@ -2168,7 +2182,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- return_type = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ return_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(*return_type);
@@ -2235,8 +2249,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_, return_type_descriptor,
- false);
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(),
+ return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2257,11 +2271,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
} else {
descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2318,7 +2333,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
descriptor = MethodHelper(abs_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2584,7 +2600,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2850,18 +2867,18 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
const RegType& referrer = GetDeclaringClass();
- mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+ mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
const RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
- : reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ : reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
if (result.IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
<< "' in " << referrer;
return result;
}
if (klass == NULL && !result.IsUnresolvedTypes()) {
- dex_cache_->SetResolvedType(class_idx, result.GetClass());
+ (*dex_cache_)->SetResolvedType(class_idx, result.GetClass());
}
// Check if access is allowed. Unresolved types use xxxWithAccessCheck to
// check at runtime if access is allowed and so pass here. If result is
@@ -2935,7 +2952,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
}
mirror::Class* klass = klass_type.GetClass();
const RegType& referrer = GetDeclaringClass();
- mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
+ mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
const Signature signature = dex_file_->GetMethodSignature(method_id);
@@ -2948,7 +2965,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
res_method = klass->FindVirtualMethod(name, signature);
}
if (res_method != NULL) {
- dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
+ (*dex_cache_)->SetResolvedMethod(dex_method_idx, res_method);
} else {
// If a virtual or interface method wasn't found with the expected type, look in
// the direct methods. This can happen when the wrong invoke type is used or when
@@ -3112,7 +3129,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (reg_type.IsIntegralTypes()) {
const RegType& src_type = work_line_->GetRegisterType(get_reg);
@@ -3136,8 +3153,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
}
mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line,
- bool is_range) {
+ RegisterLine* reg_line, bool is_range) {
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
@@ -3152,11 +3168,13 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
} else {
const std::string& descriptor(actual_arg_type.GetDescriptor());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- this_class = class_linker->FindClass(descriptor.c_str(), class_loader_);
+ this_class = class_linker->FindClass(descriptor.c_str(), *class_loader_);
if (this_class == NULL) {
- Thread::Current()->ClearException();
+ Thread* self = Thread::Current();
+ self->ClearException();
// Look for a system class
- this_class = class_linker->FindClass(descriptor.c_str(), NULL);
+ SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
+ this_class = class_linker->FindClass(descriptor.c_str(), null_class_loader);
}
}
if (this_class == NULL) {
@@ -3246,7 +3264,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3290,7 +3308,7 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_);
+ const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3332,7 +3350,7 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3409,7 +3427,7 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3441,10 +3459,9 @@ mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
if (klass_type.IsUnresolvedTypes()) {
return NULL; // Can't resolve Class so no more to do here, will do checking at runtime.
}
- mirror::ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_,
- field_idx,
- dex_cache_,
- class_loader_);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, *dex_cache_,
+ *class_loader_);
if (field == NULL) {
VLOG(verifier) << "Unable to resolve static field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3460,9 +3477,8 @@ mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
} else if (!field->IsStatic()) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field) << " to be static";
return NULL;
- } else {
- return field;
}
+ return field;
}
mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
@@ -3478,10 +3494,9 @@ mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int
if (klass_type.IsUnresolvedTypes()) {
return NULL; // Can't resolve Class so no more to do here
}
- mirror::ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_,
- field_idx,
- dex_cache_,
- class_loader_);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, *dex_cache_,
+ *class_loader_);
if (field == NULL) {
VLOG(verifier) << "Unable to resolve instance field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3550,8 +3565,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- mirror::ClassLoader* loader = class_loader_;
- field_type = &reg_types_.FromDescriptor(loader, descriptor, false);
+ field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
@@ -3613,8 +3627,7 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- mirror::ClassLoader* loader = class_loader_;
- field_type = &reg_types_.FromDescriptor(loader, descriptor, false);
+ field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
@@ -3671,11 +3684,13 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
// We need to resolve the class from its descriptor.
const std::string& descriptor(object_type.GetDescriptor());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- object_class = class_linker->FindClass(descriptor.c_str(), class_loader_);
+ Thread* self = Thread::Current();
+ object_class = class_linker->FindClass(descriptor.c_str(), *class_loader_);
if (object_class == NULL) {
- Thread::Current()->ClearException();
+ self->ClearException();
// Look for a system class
- object_class = class_linker->FindClass(descriptor.c_str(), NULL);
+ SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
+ object_class = class_linker->FindClass(descriptor.c_str(), null_class_loader);
}
}
if (object_class == NULL) {
@@ -3881,8 +3896,8 @@ const RegType& MethodVerifier::GetMethodReturnType() {
MethodHelper mh(mirror_method_);
mirror::Class* return_type_class = mh.GetReturnType();
if (return_type_class != nullptr) {
- return_type_ =&reg_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
- return_type_class->CannotBeAssignedFromOtherTypes());
+ return_type_ = &reg_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
+ return_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
DCHECK(self->IsExceptionPending());
@@ -3894,7 +3909,7 @@ const RegType& MethodVerifier::GetMethodReturnType() {
const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
- return_type_ = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ return_type_ = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
}
return *return_type_;
@@ -3910,7 +3925,7 @@ const RegType& MethodVerifier::GetDeclaringClass() {
declaring_class_ = &reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes());
} else {
- declaring_class_ = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ declaring_class_ = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
}
return *declaring_class_;
@@ -3969,7 +3984,8 @@ MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() {
// String[] in which case the stores need to be of Strings.
if (array_type.IsPreciseReference()) {
const RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
- const RegType& component_type(reg_types_.GetComponentType(array_type, class_loader_));
+ const RegType& component_type(reg_types_.GetComponentType(array_type,
+ class_loader_->get()));
is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
}
}
@@ -4026,8 +4042,8 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
// We can't devirtualize abstract classes except on arrays of abstract classes.
continue;
}
- mirror::ArtMethod* abstract_method =
- dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
+ mirror::ArtMethod* abstract_method = (*dex_cache_)->GetResolvedMethod(
+ is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
if (abstract_method == NULL) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 57fde1d5d5..8a663f9f0e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -33,6 +33,7 @@
#include "reg_type_cache-inl.h"
#include "register_line.h"
#include "safe_map.h"
+#include "sirt_ref.h"
#include "UniquePtr.h"
namespace art {
@@ -142,14 +143,15 @@ class MethodVerifier {
static FailureKind VerifyClass(const mirror::Class* klass, bool allow_soft_failures,
std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FailureKind VerifyClass(const DexFile* dex_file, mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ static FailureKind VerifyClass(const DexFile* dex_file, SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
- mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags)
@@ -217,16 +219,13 @@ class MethodVerifier {
return can_load_classes_;
}
- MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader, const DexFile::ClassDef* class_def,
- const DexFile::CodeItem* code_item,
- uint32_t method_idx, mirror::ArtMethod* method,
+ MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
+ SirtRef<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
+ const DexFile::CodeItem* code_item, uint32_t method_idx, mirror::ArtMethod* method,
uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ~MethodVerifier() {
- STLDeleteElements(&failure_messages_);
- }
+ ~MethodVerifier();
// Run verification on the method. Returns true if verification completes and false if the input
// has an irrecoverable corruption.
@@ -257,8 +256,8 @@ class MethodVerifier {
* for code flow problems.
*/
static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
+ SirtRef<mirror::DexCache>& dex_cache,
+ SirtRef<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def_idx,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags,
@@ -685,9 +684,9 @@ class MethodVerifier {
const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
- mirror::DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+ SirtRef<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
- mirror::ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_);
+ SirtRef<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 50d1583bbb..d82e75de07 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -928,7 +928,8 @@ mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) {
}
mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::ClassLoader* class_loader = s->GetClassLoader();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::ClassLoader> class_loader(self, s->GetClassLoader());
std::string descriptor("[");
descriptor += ClassHelper(common_elem).GetDescriptor();
mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 446dd0080c..a62e835b1c 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -140,9 +140,10 @@ mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassL
// Class was not found, must create new type.
// Try resolving class
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ SirtRef<mirror::ClassLoader> class_loader(Thread::Current(), loader);
mirror::Class* klass = NULL;
if (can_load_classes_) {
- klass = class_linker->FindClass(descriptor, loader);
+ klass = class_linker->FindClass(descriptor, class_loader);
} else {
klass = class_linker->LookupClass(descriptor, loader);
if (klass != NULL && !klass->IsLoaded()) {