diff options
298 files changed, 5573 insertions, 4854 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 5a9e04f5dd..0a1e2e35a6 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -17,6 +17,8 @@ #include "common_compiler_test.h" #include "arch/instruction_set_features.h" +#include "art_field-inl.h" +#include "art_method.h" #include "class_linker.h" #include "compiled_method.h" #include "dex/pass_manager.h" @@ -26,7 +28,8 @@ #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "interpreter/interpreter.h" -#include "mirror/art_method.h" +#include "mirror/class_loader.h" +#include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/object-inl.h" #include "scoped_thread_state_change.h" @@ -38,7 +41,7 @@ namespace art { CommonCompilerTest::CommonCompilerTest() {} CommonCompilerTest::~CommonCompilerTest() {} -void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) { +void CommonCompilerTest::MakeExecutable(ArtMethod* method) { CHECK(method != nullptr); const CompiledMethod* compiled_method = nullptr; @@ -132,11 +135,12 @@ void CommonCompilerTest::MakeExecutable(mirror::ClassLoader* class_loader, const Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader)); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); CHECK(klass != nullptr) << "Class not found " << class_name; - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - MakeExecutable(klass->GetDirectMethod(i)); + size_t pointer_size = class_linker_->GetImagePointerSize(); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + MakeExecutable(&m); } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - MakeExecutable(klass->GetVirtualMethod(i)); + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + MakeExecutable(&m); } } @@ -225,15 +229,16 @@ void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const c Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader)); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); CHECK(klass != nullptr) << "Class not found " << class_name; - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - CompileMethod(klass->GetDirectMethod(i)); + auto pointer_size = class_linker_->GetImagePointerSize(); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + CompileMethod(&m); } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - CompileMethod(klass->GetVirtualMethod(i)); + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + CompileMethod(&m); } } -void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) { +void CommonCompilerTest::CompileMethod(ArtMethod* method) { CHECK(method != nullptr); TimingLogger timings("CommonTest::CompileMethod", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); @@ -249,7 +254,8 @@ void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_l Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); CHECK(klass != nullptr) << "Class not found " << class_name; - mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature); + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindDirectMethod(method_name, signature, pointer_size); CHECK(method != nullptr) << "Direct method not found: " << class_name << "." << method_name << signature; CompileMethod(method); @@ -262,7 +268,8 @@ void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_ Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); CHECK(klass != nullptr) << "Class not found " << class_name; - mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature); + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindVirtualMethod(method_name, signature, pointer_size); CHECK(method != nullptr) << "Virtual method not found: " << class_name << "." << method_name << signature; CompileMethod(method); diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index 8d80a2da5c..769319be40 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -45,7 +45,7 @@ class CommonCompilerTest : public CommonRuntimeTest { // Create an OatMethod based on pointers (for unit tests). OatFile::OatMethod CreateOatMethod(const void* code); - void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void MakeExecutable(const void* code_start, size_t code_length); @@ -74,7 +74,7 @@ class CommonCompilerTest : public CommonRuntimeTest { void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name, const char* method_name, const char* signature) diff --git a/compiler/compiler.h b/compiler/compiler.h index 94b0fe33db..e5d1aff08c 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -22,16 +22,13 @@ namespace art { +class ArtMethod; class Backend; struct CompilationUnit; class CompilerDriver; class CompiledMethod; class OatWriter; -namespace mirror { - class ArtMethod; -} - class Compiler { public: enum Kind { @@ -60,7 +57,7 @@ class Compiler { uint32_t method_idx, const DexFile& dex_file) const = 0; - virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const + virtual uintptr_t GetEntryPointOf(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; uint64_t GetMaximumCompilationTimeBeforeWarning() const { diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index d1ddfda545..bd590467e3 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -15,13 +15,13 @@ */ #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/mutex.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "thread-inl.h" diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc index 94be1fd4a5..be913fe634 100644 --- a/compiler/dex/mir_method_info.cc +++ b/compiler/dex/mir_method_info.cc @@ -83,7 +83,7 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver, MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_); MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr; InvokeType invoke_type = it->GetInvokeType(); - mirror::ArtMethod* resolved_method = nullptr; + ArtMethod* resolved_method = nullptr; bool string_init = false; if (default_inliner->IsStringInitMethodIndex(it->MethodIndex())) { diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 7679db8bac..7b1ec398d0 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -249,7 +249,7 @@ int MIRGraph::GetSSAUseCount(int s_reg) { size_t MIRGraph::GetNumBytesForSpecialTemps() const { // This logic is written with assumption that Method* is only special temp. DCHECK_EQ(max_available_special_compiler_temps_, 1u); - return sizeof(StackReference<mirror::ArtMethod>); + return InstructionSetPointerSize(cu_->instruction_set); } size_t MIRGraph::GetNumAvailableVRTemps() { @@ -316,6 +316,7 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) // The vreg is always the first special temp for method ptr. compiler_temp->v_reg = GetFirstSpecialTempVR(); + CHECK(reg_location_ == nullptr); } else if (ct_type == kCompilerTempBackend) { requested_backend_temp_ = true; diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 822ea2106f..981ab2c1ee 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -19,6 +19,7 @@ #include "codegen_arm.h" #include "arm_lir.h" +#include "art_method.h" #include "base/bit_utils.h" #include "base/logging.h" #include "dex/mir_graph.h" @@ -27,7 +28,6 @@ #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "gc/accounting/card_table.h" -#include "mirror/art_method.h" #include "mirror/object_array-inl.h" #include "entrypoints/quick/quick_entrypoints.h" #include "utils/dex_cache_arrays_layout-inl.h" @@ -637,7 +637,7 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info, if (direct_code == 0) { // kInvokeTgt := arg0_ref->entrypoint cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt)); } break; @@ -678,7 +678,7 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info, case 1: // Get method->dex_cache_resolved_methods_ if (!use_pc_rel) { cg->LoadRefDisp(arg0_ref, - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), arg0_ref, kNotVolatile); } @@ -708,14 +708,14 @@ int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info, kNotVolatile); } else { size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); - cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref); + cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, false); } break; case 3: // Grab the code from the method* if (direct_code == 0) { // kInvokeTgt := arg0_ref->entrypoint cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt)); } break; diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 83b27df939..b94e707354 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -83,7 +83,8 @@ class ArmMir2Lir FINAL : public Mir2Lir { void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE; bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE; - void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE; + void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, + bool wide) OVERRIDE; // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 7de8e55e56..6d30e72f86 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -1107,7 +1107,9 @@ void ArmMir2Lir::OpPcRelDexCacheArrayAddr(const DexFile* dex_file, int offset, R dex_cache_access_insns_.push_back(movt); } -void ArmMir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) { +void ArmMir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, + bool wide) { + DCHECK(!wide) << "Unsupported"; if (dex_cache_arrays_base_reg_.Valid()) { LoadRefDisp(dex_cache_arrays_base_reg_, offset - dex_cache_arrays_min_offset_, r_dest, kNotVolatile); diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h index 5bf77aae55..c530a8b23a 100644 --- a/compiler/dex/quick/arm64/arm64_lir.h +++ b/compiler/dex/quick/arm64/arm64_lir.h @@ -71,7 +71,7 @@ namespace art { * | IN[ins-1] | {Note: resides in caller's frame} * | . | * | IN[0] | - * | caller's method (StackReference<ArtMethod>)| {This is a compressed (4-bytes) reference} + * | caller's method ArtMethod* | {Pointer sized reference} * +============================================+ {Note: start of callee's frame} * | spill region | {variable sized - will include lr if non-leaf} * +--------------------------------------------+ @@ -90,7 +90,7 @@ namespace art { * | OUT[outs-2] | * | . | * | OUT[0] | - * | current method (StackReference<ArtMethod>) | <<== sp w/ 16-byte alignment + * | current method ArtMethod* | <<== sp w/ 16-byte alignment * +============================================+ */ diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index e49e40d868..83a6affe81 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -19,6 +19,7 @@ #include "codegen_arm64.h" #include "arm64_lir.h" +#include "art_method.h" #include "base/logging.h" #include "dex/mir_graph.h" #include "dex/quick/dex_file_to_method_inliner_map.h" @@ -27,7 +28,6 @@ #include "driver/compiler_options.h" #include "gc/accounting/card_table.h" #include "entrypoints/quick/quick_entrypoints.h" -#include "mirror/art_method.h" #include "mirror/object_array-inl.h" #include "utils/dex_cache_arrays_layout-inl.h" @@ -456,23 +456,22 @@ static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& tar */ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, - uint32_t unused_idx, + uint32_t unused_idx ATTRIBUTE_UNUSED, uintptr_t direct_code, uintptr_t direct_method, InvokeType type) { - UNUSED(info, unused_idx); Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get()); if (info->string_init_offset != 0) { RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); switch (state) { case 0: { // Grab target method* from thread pointer - cg->LoadRefDisp(rs_xSELF, info->string_init_offset, arg0_ref, kNotVolatile); + cg->LoadWordDisp(rs_xSELF, info->string_init_offset, arg0_ref); break; } case 1: // Grab the code from the method* if (direct_code == 0) { // kInvokeTgt := arg0_ref->entrypoint cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt)); } break; @@ -500,7 +499,7 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, } } else { bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad(); - RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); + RegStorage arg0_ref = cg->TargetPtrReg(kArg0); switch (state) { case 0: // Get the current Method* [sets kArg0] // TUNING: we can save a reg copy if Method* has been promoted. @@ -513,7 +512,7 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, case 1: // Get method->dex_cache_resolved_methods_ if (!use_pc_rel) { cg->LoadRefDisp(arg0_ref, - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), arg0_ref, kNotVolatile); } @@ -536,21 +535,19 @@ int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info, case 2: // Grab target method* CHECK_EQ(cu->dex_file, target_method.dex_file); if (!use_pc_rel) { - cg->LoadRefDisp(arg0_ref, - mirror::ObjectArray<mirror::Object>::OffsetOfElement( - target_method.dex_method_index).Int32Value(), - arg0_ref, - kNotVolatile); + cg->LoadWordDisp(arg0_ref, + mirror::Array::DataOffset(kArm64PointerSize).Uint32Value() + + target_method.dex_method_index * kArm64PointerSize, arg0_ref); } else { size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); - cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref); + cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, true); } break; case 3: // Grab the code from the method* if (direct_code == 0) { // kInvokeTgt := arg0_ref->entrypoint cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt)); } break; diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 8184f02287..ca2e012950 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -79,7 +79,8 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE; bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE; - void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE; + void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide) + OVERRIDE; LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, int offset, int check_value, LIR* target, LIR** compare) OVERRIDE; diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index 08aa5d20d0..31cf6675af 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -947,14 +947,17 @@ bool Arm64Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const { return dex_cache_arrays_layout_.Valid(); } -void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, - RegStorage r_dest) { +void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, + bool wide) { LIR* adrp = NewLIR2(kA64Adrp2xd, r_dest.GetReg(), 0); adrp->operands[2] = WrapPointer(dex_file); adrp->operands[3] = offset; adrp->operands[4] = WrapPointer(adrp); dex_cache_access_insns_.push_back(adrp); - LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, kReference, kNotVolatile); + if (wide) { + DCHECK(r_dest.Is64Bit()); + } + LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, wide ? k64 : kReference, kNotVolatile); ldr->operands[4] = adrp->operands[4]; ldr->flags.fixup = kFixupLabel; dex_cache_access_insns_.push_back(ldr); diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index fc32ecd955..d5de18d865 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -859,7 +859,8 @@ void Arm64Mir2Lir::InstallLiteralPools() { // PC-relative references to dex cache arrays. for (LIR* p : dex_cache_access_insns_) { - DCHECK(p->opcode == kA64Adrp2xd || p->opcode == kA64Ldr3rXD); + auto non_wide = UNWIDE(p->opcode); // May be a wide load for ArtMethod*. + DCHECK(non_wide == kA64Adrp2xd || non_wide == kA64Ldr3rXD) << p->opcode << " " << non_wide; const LIR* adrp = UnwrapPointer<LIR>(p->operands[4]); DCHECK_EQ(adrp->opcode, kA64Adrp2xd); const DexFile* dex_file = UnwrapPointer<DexFile>(adrp->operands[2]); @@ -895,8 +896,7 @@ void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) rl_src[0] = mir_graph_->GetSrc(mir, 0); rl_src[1] = mir_graph_->GetSrc(mir, 1); rl_src[2]= mir_graph_->GetSrc(mir, 2); - GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], - (opcode == kMirOpMsubInt) ? true : false); + GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubInt); break; case kMirOpMaddLong: case kMirOpMsubLong: @@ -904,8 +904,7 @@ void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) rl_src[0] = mir_graph_->GetSrcWide(mir, 0); rl_src[1] = mir_graph_->GetSrcWide(mir, 2); rl_src[2] = mir_graph_->GetSrcWide(mir, 4); - GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2], - (opcode == kMirOpMsubLong) ? true : false); + GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubLong); break; default: LOG(FATAL) << "Unexpected opcode: " << static_cast<int>(opcode); diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 86bb69d01e..f4bf31fb8a 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1298,8 +1298,8 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType // resolve these invokes to the same method, so we don't care which one we record here. data_target->operands[2] = type; } - // Loads an ArtMethod pointer, which is a reference as it lives in the heap. - OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); + // Loads an ArtMethod pointer, which is not a reference. + OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target); DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target); } @@ -1322,7 +1322,8 @@ bool Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const { void Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file ATTRIBUTE_UNUSED, int offset ATTRIBUTE_UNUSED, - RegStorage r_dest ATTRIBUTE_UNUSED) { + RegStorage r_dest ATTRIBUTE_UNUSED, + bool wide ATTRIBUTE_UNUSED) { LOG(FATAL) << "No generic implementation."; UNREACHABLE(); } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 63f83f94cf..af108170e6 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -97,11 +97,11 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel LockTemp(r_base); if (CanUseOpPcRelDexCacheArrayLoad()) { uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex()); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base, false); } else { // Using fixed register to sync with possible call to runtime support. RegStorage r_method = LoadCurrMethodWithHint(r_base); - LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, + LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); @@ -693,7 +693,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) { // Fast path, static storage base is this method's class r_base = AllocTempRef(); RegStorage r_method = LoadCurrMethodWithHint(r_base); - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, + LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base, kNotVolatile); } else { // Medium path, static storage base in a different class which requires checks that the other @@ -771,7 +771,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Typ // Fast path, static storage base is this method's class r_base = AllocTempRef(); RegStorage r_method = LoadCurrMethodWithHint(r_base); - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, + LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base, kNotVolatile); } else { // Medium path, static storage base in a different class which requires checks that the other @@ -1031,10 +1031,10 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // We don't need access checks, load type from dex cache if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false); } else { int32_t dex_cache_offset = - mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); + ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); RegStorage res_reg = AllocTempRef(); RegStorage r_method = LoadCurrMethodWithHint(res_reg); LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile); @@ -1066,13 +1066,12 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { RegStorage ret0 = TargetReg(kRet0, kRef); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0, false); } else { // Method to declaring class. RegStorage arg0 = TargetReg(kArg0, kRef); RegStorage r_method = LoadCurrMethodWithHint(arg0); - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), - arg0, kNotVolatile); + LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile); // Declaring class to dex cache strings. LoadRefDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0, kNotVolatile); @@ -1086,11 +1085,11 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false); } else { RegLocation rl_method = LoadCurrMethod(); RegStorage res_reg = AllocTempRef(); - LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), res_reg, + LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), res_reg, kNotVolatile); LoadRefDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg, kNotVolatile); @@ -1173,18 +1172,18 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re if (use_declaring_class) { RegStorage r_method = LoadCurrMethodWithHint(check_class); - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, + LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), check_class, kNotVolatile); LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); } else if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, check_class, false); LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); } else { RegStorage r_method = LoadCurrMethodWithHint(check_class); - LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class, kNotVolatile); LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, kNotVolatile); @@ -1232,7 +1231,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } else if (use_declaring_class) { RegStorage r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref - LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { if (can_assume_type_is_in_dex_cache) { @@ -1242,11 +1241,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false); } else { RegStorage r_method = LoadCurrMethodWithHint(class_reg); // Load dex cache entry into class_reg (kArg2) - LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); @@ -1367,17 +1366,17 @@ void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path } else if (use_declaring_class) { RegStorage method_reg = LoadCurrMethodWithHint(TargetReg(kArg1, kRef)); - LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(method_reg, ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) if (CanUseOpPcRelDexCacheArrayLoad()) { size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx); - OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg); + OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false); } else { RegStorage r_method = LoadCurrMethodWithHint(class_reg); - LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(r_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index ab011fc0b2..1f114cf336 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -398,7 +398,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation( // TODO: Support 64-bit argument registers. void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { /* - * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod> + * Dummy up a RegLocation for the incoming ArtMethod* * It will attempt to keep kArg0 live (or copy it to home location * if promoted). */ @@ -407,10 +407,15 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { rl_src.reg = TargetReg(kArg0, kRef); rl_src.home = false; MarkLive(rl_src); - StoreValue(rl_method, rl_src); + if (cu_->target64) { + DCHECK(rl_method.wide); + StoreValueWide(rl_method, rl_src); + } else { + StoreValue(rl_method, rl_src); + } // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { - StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile); + StoreBaseDisp(TargetPtrReg(kSp), 0, rl_src.reg, kWord, kNotVolatile); } if (mir_graph_->GetNumOfInVRs() == 0) { @@ -498,7 +503,7 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) { static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from, const CompilationUnit* cu, Mir2Lir* cg) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset( InstructionSetPointerSize(cu->instruction_set)).Int32Value(); // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset, @@ -535,10 +540,12 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, break; case 2: { // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0] - int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + - method_idx * sizeof(mirror::Class::VTableEntry); + const size_t pointer_size = InstructionSetPointerSize( + cu->compiler_driver->GetInstructionSet()); + int32_t offset = mirror::Class::EmbeddedVTableEntryOffset( + method_idx, pointer_size).Uint32Value(); // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); + cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0)); break; } case 3: @@ -580,10 +587,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, // Includes a null-check. break; case 3: { // Get target method [use kInvokeTgt, set kArg0] - int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + const size_t pointer_size = InstructionSetPointerSize( + cu->compiler_driver->GetInstructionSet()); + int32_t offset = mirror::Class::EmbeddedImTableEntryOffset( + method_idx % mirror::Class::kImtSize, pointer_size).Uint32Value(); // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); + cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0)); break; } case 4: @@ -967,7 +976,7 @@ bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); GenNullCheck(rl_obj.reg, info->opt_flags); LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg, - kNotVolatile); + kNotVolatile); MarkPossibleNullPointerException(info->opt_flags); StoreValue(rl_dest, rl_result); @@ -1418,7 +1427,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); - if (Is64BitInstructionSet(cu_->instruction_set)) { + if (cu_->target64) { LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg, kNotVolatile); } else { diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 4215e8bc50..aa95e77f6d 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -42,7 +42,7 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) { * register liveness. That is the responsibility of the caller. */ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) { - rl_src = UpdateLoc(rl_src); + rl_src = rl_src.wide ? UpdateLocWide(rl_src) : UpdateLoc(rl_src); if (rl_src.location == kLocPhysReg) { OpRegCopy(r_dest, rl_src.reg); } else if (IsInexpensiveConstant(rl_src)) { @@ -53,11 +53,15 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) { DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); + OpSize op_size; if (rl_src.ref) { - LoadRefDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, kNotVolatile); + op_size = kReference; + } else if (rl_src.wide) { + op_size = k64; } else { - Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest); + op_size = k32; } + LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, op_size, kNotVolatile); } } @@ -337,7 +341,11 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) { /* Utilities to load the current Method* */ void Mir2Lir::LoadCurrMethodDirect(RegStorage r_tgt) { - LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt); + if (GetCompilationUnit()->target64) { + LoadValueDirectWideFixed(mir_graph_->GetMethodLoc(), r_tgt); + } else { + LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt); + } } RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) { @@ -355,7 +363,9 @@ RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) { } RegLocation Mir2Lir::LoadCurrMethod() { - return LoadValue(mir_graph_->GetMethodLoc(), kRefReg); + return GetCompilationUnit()->target64 ? + LoadValueWide(mir_graph_->GetMethodLoc(), kCoreReg) : + LoadValue(mir_graph_->GetMethodLoc(), kRefReg); } RegLocation Mir2Lir::ForceTemp(RegLocation loc) { diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 3d253842c9..da12d8e3bf 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -18,6 +18,7 @@ #include "codegen_mips.h" +#include "art_method.h" #include "base/logging.h" #include "dex/mir_graph.h" #include "dex/quick/dex_file_to_method_inliner_map.h" @@ -26,7 +27,6 @@ #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "mips_lir.h" -#include "mirror/art_method.h" #include "mirror/object_array-inl.h" namespace art { @@ -407,12 +407,12 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); switch (state) { case 0: { // Grab target method* from thread pointer - cg->LoadRefDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref, kNotVolatile); + cg->LoadWordDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref); break; } case 1: // Grab the code from the method* if (direct_code == 0) { - int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset( InstructionSetPointerSize(cu->instruction_set)).Int32Value(); cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt)); } @@ -454,7 +454,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, break; case 1: // Get method->dex_cache_resolved_methods_ cg->LoadRefDisp(arg0_ref, - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), arg0_ref, kNotVolatile); // Set up direct code if known. @@ -471,17 +471,18 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state, } } break; - case 2: // Grab target method* + case 2: { + // Grab target method* CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadRefDisp(arg0_ref, - mirror::ObjectArray<mirror::Object>:: - OffsetOfElement(target_method.dex_method_index).Int32Value(), - arg0_ref, - kNotVolatile); + const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set); + cg->LoadWordDisp(arg0_ref, + mirror::Array::DataOffset(pointer_size).Uint32Value() + + target_method.dex_method_index * pointer_size, arg0_ref); break; + } case 3: // Grab the code from the method* if (direct_code == 0) { - int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset( InstructionSetPointerSize(cu->instruction_set)).Int32Value(); // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt)); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index e3e87ecb13..7ca03cf0ee 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -1232,6 +1232,10 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { ResetRegPool(); int start_vreg = mir_graph_->GetFirstInVR(); AppendLIR(NewLIR0(kPseudoPrologueBegin)); + DCHECK_EQ(cu_->target64, Is64BitInstructionSet(cu_->instruction_set)); + if (cu_->target64) { + DCHECK(mir_graph_->GetMethodLoc().wide); + } GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc()); AppendLIR(NewLIR0(kPseudoPrologueEnd)); DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_); diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index d54616f47c..73787e958e 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -982,12 +982,11 @@ class Mir2Lir { } // Load a reference at base + displacement and decompress into register. LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest, - VolatileKind is_volatile) { + VolatileKind is_volatile) { return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile); } // Load a reference at base + index and decompress into register. - LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, - int scale) { + LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale) { return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference); } // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. @@ -1008,12 +1007,11 @@ class Mir2Lir { } // Store an uncompressed reference into a compressed 32-bit container. LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, - VolatileKind is_volatile) { + VolatileKind is_volatile) { return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); } // Store an uncompressed reference into a compressed 32-bit container by index. - LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, - int scale) { + LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) { return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); } // Store 32 bits, regardless of target. @@ -1117,8 +1115,10 @@ class Mir2Lir { * @param dex_file the dex file associated with the target dex cache. * @param offset the offset of the element in the fixed dex cache arrays' layout. * @param r_dest the register where to load the element. + * @param wide, load 64 bits if true, otherwise 32 bits. */ - virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest); + virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, + bool wide); // Routines that work for the generic case, but may be overriden by target. /* diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index b3c73557a7..8694ebc738 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -100,7 +100,8 @@ class QuickCFITest : public CFITest { } } m2l->AdjustSpillMask(); - m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef()); + m2l->GenEntrySequence(nullptr, m2l->GetCompilationUnit()->target64 ? + m2l->LocCReturnWide() : m2l->LocCReturnRef()); m2l->GenExitSequence(); m2l->HandleSlowPaths(); m2l->AssembleLIR(); diff --git a/compiler/dex/quick/quick_cfi_test_expected.inc b/compiler/dex/quick/quick_cfi_test_expected.inc index 48109d2f44..52d66a40a8 100644 --- a/compiler/dex/quick/quick_cfi_test_expected.inc +++ b/compiler/dex/quick/quick_cfi_test_expected.inc @@ -34,7 +34,7 @@ static constexpr uint8_t expected_cfi_kThumb2[] = { static constexpr uint8_t expected_asm_kArm64[] = { 0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF3, 0xD3, 0x02, 0xA9, - 0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xB9, 0xE8, 0xA7, 0x41, 0x6D, + 0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xF9, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6, }; @@ -54,7 +54,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = { // 0x0000000c: .cfi_offset: r20 at cfa-16 // 0x0000000c: str lr, [sp, #56] // 0x00000010: .cfi_offset: r30 at cfa-8 -// 0x00000010: str w0, [sp] +// 0x00000010: str x0, [sp] // 0x00000014: .cfi_remember_state // 0x00000014: ldp d8, d9, [sp, #24] // 0x00000018: .cfi_restore_extended: r72 @@ -101,15 +101,15 @@ static constexpr uint8_t expected_cfi_kX86[] = { static constexpr uint8_t expected_asm_kX86_64[] = { 0x48, 0x83, 0xEC, 0x38, 0x48, 0x89, 0x5C, 0x24, 0x28, 0x48, 0x89, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, - 0x11, 0x6C, 0x24, 0x20, 0x48, 0x8B, 0xC7, 0x89, 0x3C, 0x24, 0x48, 0x8B, - 0x5C, 0x24, 0x28, 0x48, 0x8B, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, - 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0x48, 0x83, - 0xC4, 0x38, 0xC3, 0x00, + 0x11, 0x6C, 0x24, 0x20, 0x48, 0x8B, 0xC7, 0x48, 0x89, 0x3C, 0x24, 0x48, + 0x8B, 0x5C, 0x24, 0x28, 0x48, 0x8B, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, + 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0x48, + 0x83, 0xC4, 0x38, 0xC3, }; static constexpr uint8_t expected_cfi_kX86_64[] = { 0x44, 0x0E, 0x40, 0x45, 0x83, 0x06, 0x45, 0x86, 0x04, 0x47, 0x9D, 0x0A, - 0x47, 0x9E, 0x08, 0x46, 0x0A, 0x45, 0xC3, 0x45, 0xC6, 0x47, 0xDD, 0x47, - 0xDE, 0x44, 0x0E, 0x08, 0x42, 0x0B, 0x0E, 0x40, + 0x47, 0x9E, 0x08, 0x47, 0x0A, 0x45, 0xC3, 0x45, 0xC6, 0x47, 0xDD, 0x47, + 0xDE, 0x44, 0x0E, 0x08, 0x41, 0x0B, 0x0E, 0x40, }; // 0x00000000: subq rsp, 56 // 0x00000004: .cfi_def_cfa_offset: 64 @@ -122,20 +122,19 @@ static constexpr uint8_t expected_cfi_kX86_64[] = { // 0x00000015: movsd [rsp + 32], xmm13 // 0x0000001c: .cfi_offset: r30 at cfa-32 // 0x0000001c: movq rax, rdi -// 0x0000001f: mov [rsp], edi -// 0x00000022: .cfi_remember_state -// 0x00000022: movq rbx, [rsp + 40] -// 0x00000027: .cfi_restore: r3 -// 0x00000027: movq rbp, [rsp + 48] -// 0x0000002c: .cfi_restore: r6 -// 0x0000002c: movsd xmm12, [rsp + 24] -// 0x00000033: .cfi_restore: r29 -// 0x00000033: movsd xmm13, [rsp + 32] -// 0x0000003a: .cfi_restore: r30 -// 0x0000003a: addq rsp, 56 -// 0x0000003e: .cfi_def_cfa_offset: 8 -// 0x0000003e: ret -// 0x0000003f: addb al, al +// 0x0000001f: movq [rsp], rdi +// 0x00000023: .cfi_remember_state +// 0x00000023: movq rbx, [rsp + 40] +// 0x00000028: .cfi_restore: r3 +// 0x00000028: movq rbp, [rsp + 48] +// 0x0000002d: .cfi_restore: r6 +// 0x0000002d: movsd xmm12, [rsp + 24] +// 0x00000034: .cfi_restore: r29 +// 0x00000034: movsd xmm13, [rsp + 32] +// 0x0000003b: .cfi_restore: r30 +// 0x0000003b: addq rsp, 56 +// 0x0000003f: .cfi_def_cfa_offset: 8 +// 0x0000003f: ret // 0x00000040: .cfi_restore_state // 0x00000040: .cfi_def_cfa_offset: 64 @@ -172,7 +171,7 @@ static constexpr uint8_t expected_cfi_kMips[] = { // 0x00000028: .cfi_restore: r31 // 0x00000028: addiu r29, r29, 64 // 0x0000002c: .cfi_def_cfa_offset: 0 -// 0x0000002c: jalr r0, r31 +// 0x0000002c: jr r31 // 0x00000030: nop // 0x00000034: .cfi_restore_state // 0x00000034: .cfi_def_cfa_offset: 64 @@ -180,7 +179,7 @@ static constexpr uint8_t expected_cfi_kMips[] = { static constexpr uint8_t expected_asm_kMips64[] = { 0xE8, 0xFF, 0xBD, 0x67, 0x10, 0x00, 0xB2, 0xFF, 0x08, 0x00, 0xB3, 0xFF, 0x00, 0x00, 0xBF, 0xFF, 0xD8, 0xFF, 0xBD, 0x67, 0x25, 0x10, 0x80, 0x00, - 0x00, 0x00, 0xA4, 0xAF, 0x38, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF, + 0x00, 0x00, 0xA4, 0xFF, 0x38, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xBF, 0xDF, 0x40, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00, }; @@ -200,7 +199,7 @@ static constexpr uint8_t expected_cfi_kMips64[] = { // 0x00000010: daddiu r29, r29, -40 // 0x00000014: .cfi_def_cfa_offset: 64 // 0x00000014: or r2, r4, r0 -// 0x00000018: sw r4, +0(r29) +// 0x00000018: sd r4, +0(r29) // 0x0000001c: .cfi_remember_state // 0x0000001c: ld r18, +56(r29) // 0x00000020: .cfi_restore: r18 @@ -214,4 +213,3 @@ static constexpr uint8_t expected_cfi_kMips64[] = { // 0x00000030: nop // 0x00000034: .cfi_restore_state // 0x00000034: .cfi_def_cfa_offset: 64 - diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 7ca438225f..58236e2216 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -18,6 +18,7 @@ #include <cstdint> +#include "art_method-inl.h" #include "base/dumpable.h" #include "base/logging.h" #include "base/macros.h" @@ -37,7 +38,6 @@ #include "elf_writer_quick.h" #include "jni/quick/jni_compiler.h" #include "mir_to_lir.h" -#include "mirror/art_method-inl.h" #include "mirror/object.h" #include "runtime.h" @@ -787,7 +787,7 @@ CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags, return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); } -uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const { +uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); } diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h index 8d2c324a70..43dd5786af 100644 --- a/compiler/dex/quick/quick_compiler.h +++ b/compiler/dex/quick/quick_compiler.h @@ -49,7 +49,7 @@ class QuickCompiler : public Compiler { uint32_t method_idx, const DexFile& dex_file) const OVERRIDE; - uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE + uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit); diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 249575761e..43167a187f 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -18,13 +18,13 @@ #include "codegen_x86.h" +#include "art_method.h" #include "base/logging.h" #include "dex/quick/dex_file_to_method_inliner_map.h" #include "dex/quick/mir_to_lir-inl.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "gc/accounting/card_table.h" -#include "mirror/art_method.h" #include "mirror/object_array-inl.h" #include "utils/dex_cache_arrays_layout-inl.h" #include "x86_lir.h" @@ -379,7 +379,8 @@ int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info, case 0: { CHECK_EQ(cu->dex_file, target_method.dex_file); size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); - cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef)); + cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef), + cu->target64); break; } default: @@ -394,18 +395,20 @@ int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info, break; case 1: // Get method->dex_cache_resolved_methods_ cg->LoadRefDisp(arg0_ref, - mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), arg0_ref, kNotVolatile); break; - case 2: // Grab target method* + case 2: { + // Grab target method* CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadRefDisp(arg0_ref, - mirror::ObjectArray<mirror::Object>::OffsetOfElement( - target_method.dex_method_index).Int32Value(), - arg0_ref, - kNotVolatile); + const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set); + cg->LoadWordDisp(arg0_ref, + mirror::Array::DataOffset(pointer_size).Uint32Value() + + target_method.dex_method_index * pointer_size, + arg0_ref); break; + } default: return -1; } diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 5a465203bc..11d9d4ac34 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -105,7 +105,8 @@ class X86Mir2Lir FINAL : public Mir2Lir { void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE; bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE; - void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest) OVERRIDE; + void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide) + OVERRIDE; void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 9bbb5f84be..d993d934a5 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -18,11 +18,11 @@ #include "codegen_x86.h" +#include "art_method.h" #include "base/bit_utils.h" #include "base/logging.h" #include "dex/quick/mir_to_lir-inl.h" #include "dex/reg_storage_eq.h" -#include "mirror/art_method.h" #include "mirror/array-inl.h" #include "x86_lir.h" @@ -1410,16 +1410,18 @@ RegStorage X86Mir2Lir::GetPcAndAnchor(LIR** anchor, RegStorage r_tmp) { } } -void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, - RegStorage r_dest) { +void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, + bool wide) { if (cu_->target64) { - LIR* mov = NewLIR3(kX86Mov32RM, r_dest.GetReg(), kRIPReg, kDummy32BitOffset); + LIR* mov = NewLIR3(wide ? kX86Mov64RM : kX86Mov32RM, r_dest.GetReg(), kRIPReg, + kDummy32BitOffset); mov->flags.fixup = kFixupLabel; mov->operands[3] = WrapPointer(dex_file); mov->operands[4] = offset; mov->target = mov; // Used for pc_insn_offset (not used by x86-64 relative patcher). dex_cache_access_insns_.push_back(mov); } else { + CHECK(!wide) << "Unsupported"; // Get the PC to a register and get the anchor. Use r_dest for the temp if needed. LIR* anchor; RegStorage r_pc = GetPcAndAnchor(&anchor, r_dest); @@ -3022,20 +3024,20 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, if (rl_method.location == kLocPhysReg) { if (use_declaring_class) { - LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), check_class, kNotVolatile); } else { - LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(rl_method.reg, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class, kNotVolatile); LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); } } else { LoadCurrMethodDirect(check_class); if (use_declaring_class) { - LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + LoadRefDisp(check_class, ArtMethod::DeclaringClassOffset().Int32Value(), check_class, kNotVolatile); } else { - LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + LoadRefDisp(check_class, ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class, kNotVolatile); LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); } @@ -3059,7 +3061,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, } void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, - RegLocation rl_lhs, RegLocation rl_rhs, int flags) { + RegLocation rl_lhs, RegLocation rl_rhs, int flags) { OpKind op = kOpBkpt; bool is_div_rem = false; bool unary = false; diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 2f211da264..c62cd47315 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -21,6 +21,7 @@ #include <string> #include "arch/instruction_set_features.h" +#include "art_method.h" #include "backend_x86.h" #include "base/logging.h" #include "dex/compiler_ir.h" @@ -28,7 +29,6 @@ #include "dex/reg_storage_eq.h" #include "driver/compiler_driver.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "oat.h" #include "x86_lir.h" @@ -744,6 +744,7 @@ void X86Mir2Lir::SpillCoreRegs() { const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32; for (int reg = 0; mask != 0u; mask >>= 1, reg++) { if ((mask & 0x1) != 0u) { + DCHECK_NE(offset, 0) << "offset 0 should be for method"; RegStorage r_src = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg); StoreBaseDisp(rs_rSP, offset, r_src, size, kNotVolatile); cfi_.RelOffset(DwarfCoreReg(cu_->target64, reg), offset); @@ -1026,7 +1027,7 @@ LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) { call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType()); } else { call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( cu_->target64 ? 8 : 4).Int32Value()); } } else { @@ -1103,7 +1104,7 @@ void X86Mir2Lir::InstallLiteralPools() { // PC-relative references to dex cache arrays. for (LIR* p : dex_cache_access_insns_) { - DCHECK(p->opcode == kX86Mov32RM); + DCHECK(p->opcode == kX86Mov32RM || p->opcode == kX86Mov64RM); const DexFile* dex_file = UnwrapPointer<DexFile>(p->operands[3]); uint32_t offset = p->operands[4]; // The offset to patch is the last 4 bytes of the instruction. diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h index 57db0158e4..d6a6a60d3d 100644 --- a/compiler/dex/quick/x86/x86_lir.h +++ b/compiler/dex/quick/x86/x86_lir.h @@ -82,7 +82,7 @@ namespace art { * | IN[ins-1] | {Note: resides in caller's frame} * | . | * | IN[0] | - * | caller's Method* | + * | caller's ArtMethod* | * +===========================+ {Note: start of callee's frame} * | return address | {pushed by call} * | spill region | {variable sized} @@ -104,7 +104,7 @@ namespace art { * | OUT[outs-2] | * | . | * | OUT[0] | - * | StackReference<ArtMethod> | <<== sp w/ 16-byte alignment + * | ArtMethod* | <<== sp w/ 16-byte alignment * +===========================+ */ diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc index cd6467f4e5..a0dfcbee5b 100644 --- a/compiler/dex/type_inference.cc +++ b/compiler/dex/type_inference.cc @@ -686,8 +686,8 @@ TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* m void TypeInference::InitializeSRegs() { std::fill_n(sregs_, num_sregs_, Type::Unknown()); - /* Treat ArtMethod* as a normal reference */ - sregs_[mir_graph_->GetMethodSReg()] = Type::NonArrayRefType(); + /* Treat ArtMethod* specially since they are pointer sized */ + sregs_[mir_graph_->GetMethodSReg()] = Type::ArtMethodType(cu_->target64); // Initialize parameter SSA regs at method entry. int32_t entry_param_s_reg = mir_graph_->GetFirstInVR(); diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h index 85f79af36e..adc3b54bc9 100644 --- a/compiler/dex/type_inference.h +++ b/compiler/dex/type_inference.h @@ -81,6 +81,10 @@ class TypeInference : public DeletableArenaObject<kArenaAllocMisc> { return Type(kFlagLowWord | kFlagNarrow | kFlagRef); } + static Type ArtMethodType(bool wide) { + return Type(kFlagLowWord | kFlagRef | (wide ? kFlagWide : kFlagNarrow)); + } + static Type ObjectArrayType() { return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef); diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc index e788261ad0..ac7a4a7758 100644 --- a/compiler/dex/verified_method.cc +++ b/compiler/dex/verified_method.cc @@ -20,12 +20,12 @@ #include <memory> #include <vector> +#include "art_method-inl.h" #include "base/logging.h" #include "base/stl_util.h" #include "dex_file.h" #include "dex_instruction-inl.h" #include "dex_instruction_utils.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" @@ -212,7 +212,7 @@ bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verif if (is_virtual_quick || is_range_quick) { uint32_t dex_pc = inst->GetDexPc(insns); verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); - mirror::ArtMethod* method = + ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true); if (method == nullptr) { // It can be null if the line wasn't verified since it was unreachable. @@ -284,20 +284,24 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier // We can't devirtualize abstract classes except on arrays of abstract classes. continue; } - mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod( - is_range ? inst->VRegB_3rc() : inst->VRegB_35c()); + auto* cl = Runtime::Current()->GetClassLinker(); + size_t pointer_size = cl->GetImagePointerSize(); + ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod( + is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size); if (abstract_method == nullptr) { // If the method is not found in the cache this means that it was never found // by ResolveMethodAndCheckAccess() called when verifying invoke_*. continue; } // Find the concrete method. - mirror::ArtMethod* concrete_method = nullptr; + ArtMethod* concrete_method = nullptr; if (is_interface) { - concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(abstract_method); + concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface( + abstract_method, pointer_size); } if (is_virtual) { - concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(abstract_method); + concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual( + abstract_method, pointer_size); } if (concrete_method == nullptr || concrete_method->IsAbstract()) { // In cases where concrete_method is not found, or is abstract, continue to the next invoke. diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index e54cbf6fb6..b25e967609 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -20,8 +20,9 @@ #include "compiler_driver.h" #include "art_field-inl.h" +#include "art_method-inl.h" +#include "class_linker-inl.h" #include "dex_compilation_unit.h" -#include "mirror/art_method-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" #include "scoped_thread_state_change.h" @@ -133,7 +134,7 @@ inline bool CompilerDriver::CanAccessResolvedMember(mirror::Class* referrer_clas ArtMember* member ATTRIBUTE_UNUSED, mirror::DexCache* dex_cache ATTRIBUTE_UNUSED, uint32_t field_idx ATTRIBUTE_UNUSED) { - // Not defined for ArtMember values other than ArtField or mirror::ArtMethod. + // Not defined for ArtMember values other than ArtField or ArtMethod. UNREACHABLE(); } @@ -147,10 +148,10 @@ inline bool CompilerDriver::CanAccessResolvedMember<ArtField>(mirror::Class* ref } template <> -inline bool CompilerDriver::CanAccessResolvedMember<mirror::ArtMethod>( +inline bool CompilerDriver::CanAccessResolvedMember<ArtMethod>( mirror::Class* referrer_class, mirror::Class* access_to, - mirror::ArtMethod* method, + ArtMethod* method, mirror::DexCache* dex_cache, uint32_t field_idx) { return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx); @@ -217,7 +218,7 @@ inline std::pair<bool, bool> CompilerDriver::IsFastStaticField( inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer( mirror::DexCache* dex_cache, mirror::Class* referrer_class, - mirror::ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) { + ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) { std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer( dex_cache, referrer_class, resolved_method, method_idx, storage_index); // Only the first member of `result` is meaningful, as there is no @@ -239,15 +240,14 @@ inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referr return fields_class == referrer_class || fields_class->IsInitialized(); } -inline mirror::ArtMethod* CompilerDriver::ResolveMethod( +inline ArtMethod* CompilerDriver::ResolveMethod( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())); - mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod( - *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, NullHandle<mirror::ArtMethod>(), - invoke_type); + ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod( + *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type); DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending()); if (UNLIKELY(resolved_method == nullptr)) { // Clean up any exception left by type resolution. @@ -263,7 +263,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod( } inline void CompilerDriver::GetResolvedMethodDexFileLocation( - mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file, + ArtMethod* resolved_method, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) { mirror::Class* declaring_class = resolved_method->GetDeclaringClass(); *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile(); @@ -272,7 +272,7 @@ inline void CompilerDriver::GetResolvedMethodDexFileLocation( } inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex( - mirror::ArtMethod* resolved_method, InvokeType type) { + ArtMethod* resolved_method, InvokeType type) { if (type == kVirtual || type == kSuper) { return resolved_method->GetMethodIndex(); } else if (type == kInterface) { @@ -285,7 +285,7 @@ inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex( inline int CompilerDriver::IsFastInvoke( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, - mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type, + mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, MethodReference* target_method, const MethodReference* devirt_target, uintptr_t* direct_code, uintptr_t* direct_method) { // Don't try to fast-path if we don't understand the caller's class. @@ -305,10 +305,12 @@ inline int CompilerDriver::IsFastInvoke( (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal()); // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of // the super class. + const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet()); bool can_sharpen_super_based_on_type = same_dex_file && (*invoke_type == kSuper) && (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) && resolved_method->GetMethodIndex() < methods_class->GetVTableLength() && - (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) && + (methods_class->GetVTableEntry( + resolved_method->GetMethodIndex(), pointer_size) == resolved_method) && !resolved_method->IsAbstract(); if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) { @@ -316,7 +318,8 @@ inline int CompilerDriver::IsFastInvoke( // dex cache, check that this resolved method is where we expect it. CHECK_EQ(target_method->dex_file, mUnit->GetDexFile()); DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())); - CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index), + CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod( + target_method->dex_method_index, pointer_size), resolved_method) << PrettyMethod(resolved_method); int stats_flags = kFlagMethodResolved; GetCodeAndMethodForDirectCall(/*out*/invoke_type, @@ -336,21 +339,18 @@ inline int CompilerDriver::IsFastInvoke( if ((*invoke_type == kVirtual || *invoke_type == kInterface) && devirt_target != nullptr) { // Post-verification callback recorded a more precise invoke target based on its type info. - mirror::ArtMethod* called_method; + ArtMethod* called_method; ClassLinker* class_linker = mUnit->GetClassLinker(); if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) { - called_method = class_linker->ResolveMethod(*devirt_target->dex_file, - devirt_target->dex_method_index, dex_cache, - class_loader, NullHandle<mirror::ArtMethod>(), - kVirtual); + called_method = class_linker->ResolveMethod( + *devirt_target->dex_file, devirt_target->dex_method_index, dex_cache, class_loader, + nullptr, kVirtual); } else { StackHandleScope<1> hs(soa.Self()); - Handle<mirror::DexCache> target_dex_cache( - hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file))); - called_method = class_linker->ResolveMethod(*devirt_target->dex_file, - devirt_target->dex_method_index, - target_dex_cache, class_loader, - NullHandle<mirror::ArtMethod>(), kVirtual); + auto target_dex_cache(hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file))); + called_method = class_linker->ResolveMethod( + *devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache, + class_loader, nullptr, kVirtual); } CHECK(called_method != nullptr); CHECK(!called_method->IsAbstract()); @@ -389,7 +389,7 @@ inline int CompilerDriver::IsFastInvoke( } inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class, - mirror::ArtMethod* resolved_method) { + ArtMethod* resolved_method) { if (!resolved_method->IsStatic()) { return true; } diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 7cc5aae8c7..e963c12402 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -28,6 +28,7 @@ #endif #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/stl_util.h" #include "base/time_utils.h" #include "base/timing_logger.h" @@ -50,8 +51,8 @@ #include "runtime.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap.h" +#include "gc/space/image_space.h" #include "gc/space/space.h" -#include "mirror/art_method-inl.h" #include "mirror/class_loader.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -542,7 +543,7 @@ DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel( } } -void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) { +void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) { DCHECK(!Runtime::Current()->IsStarted()); jobject jclass_loader; const DexFile* dex_file; @@ -586,7 +587,7 @@ void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingL self->TransitionFromSuspendedToRunnable(); } -CompiledMethod* CompilerDriver::CompileMethod(Thread* self, mirror::ArtMethod* method) { +CompiledMethod* CompilerDriver::CompileMethod(Thread* self, ArtMethod* method) { const uint32_t method_idx = method->GetDexMethodIndex(); const uint32_t access_flags = method->GetAccessFlags(); const InvokeType invoke_type = method->GetInvokeType(); @@ -688,8 +689,8 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end(); } -static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_handle, - std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) +static void ResolveExceptionsForMethod( + ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); if (code_item == nullptr) { @@ -728,17 +729,14 @@ static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_h static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::set<std::pair<uint16_t, const DexFile*>>* exceptions_to_resolve = + auto* exceptions_to_resolve = reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg); - StackHandleScope<1> hs(Thread::Current()); - MutableHandle<mirror::ArtMethod> method_handle(hs.NewHandle<mirror::ArtMethod>(nullptr)); - for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { - method_handle.Assign(c->GetVirtualMethod(i)); - ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve); + const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& m : c->GetVirtualMethods(pointer_size)) { + ResolveExceptionsForMethod(&m, *exceptions_to_resolve); } - for (size_t i = 0; i < c->NumDirectMethods(); ++i) { - method_handle.Assign(c->GetDirectMethod(i)); - ResolveExceptionsForMethod(method_handle, *exceptions_to_resolve); + for (auto& m : c->GetDirectMethods(pointer_size)) { + ResolveExceptionsForMethod(&m, *exceptions_to_resolve); } return true; } @@ -826,6 +824,7 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, // Make a copy of the handle so that we don't clobber it doing Assign. MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get())); std::string temp; + const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); while (!klass->IsObjectClass()) { const char* descriptor = klass->GetDescriptor(&temp); std::pair<std::unordered_set<std::string>::iterator, bool> result = @@ -839,6 +838,12 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)), image_classes); } + for (auto& m : c->GetVirtualMethods(pointer_size)) { + if (m.IsMiranda() || (true)) { + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(m.GetDeclaringClass()), image_classes); + } + } if (klass->IsArrayClass()) { StackHandleScope<1> hs2(self); MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes); @@ -855,10 +860,7 @@ class ClinitImageUpdate { Thread* self, ClassLinker* linker, std::string* error_msg) { std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self, linker)); - if (res->art_method_class_ == nullptr) { - *error_msg = "Could not find ArtMethod class."; - return nullptr; - } else if (res->dex_cache_class_ == nullptr) { + if (res->dex_cache_class_ == nullptr) { *error_msg = "Could not find DexCache class."; return nullptr; } @@ -903,8 +905,6 @@ class ClinitImageUpdate { old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure"); // Find the interesting classes. - art_method_class_ = linker->LookupClass(self, "Ljava/lang/reflect/ArtMethod;", - ComputeModifiedUtf8Hash("Ljava/lang/reflect/ArtMethod;"), nullptr); dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;", ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr); @@ -922,7 +922,8 @@ class ClinitImageUpdate { data->image_classes_.push_back(klass); } else { // Check whether it is initialized and has a clinit. They must be kept, too. - if (klass->IsInitialized() && klass->FindClassInitializer() != nullptr) { + if (klass->IsInitialized() && klass->FindClassInitializer( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { data->image_classes_.push_back(klass); } } @@ -950,9 +951,9 @@ class ClinitImageUpdate { VisitClinitClassesObject(object->GetClass()); } - // If it is not a dex cache or an ArtMethod, visit all references. + // If it is not a DexCache, visit all references. mirror::Class* klass = object->GetClass(); - if (klass != art_method_class_ && klass != dex_cache_class_) { + if (klass != dex_cache_class_) { object->VisitReferences<false /* visit class */>(*this, *this); } } @@ -960,7 +961,6 @@ class ClinitImageUpdate { mutable std::unordered_set<mirror::Object*> marked_objects_; std::unordered_set<std::string>* const image_class_descriptors_; std::vector<mirror::Class*> image_classes_; - const mirror::Class* art_method_class_; const mirror::Class* dex_cache_class_; Thread* const self_; const char* old_cause_; @@ -1334,7 +1334,7 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type, bool no_guarantee_of_dex_cache_entry, const mirror::Class* referrer_class, - mirror::ArtMethod* method, + ArtMethod* method, int* stats_flags, MethodReference* target_method, uintptr_t* direct_code, @@ -1347,6 +1347,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType *direct_method = 0; Runtime* const runtime = Runtime::Current(); gc::Heap* const heap = runtime->GetHeap(); + auto* cl = runtime->GetClassLinker(); + const auto pointer_size = cl->GetImagePointerSize(); bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default const bool compiling_boot = heap->IsCompilingBoot(); // TODO This is somewhat hacky. We should refactor all of this invoke codepath. @@ -1375,7 +1377,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType if (runtime->UseJit()) { // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will // never be updated even after we compile the method. - if (runtime->GetClassLinker()->IsQuickToInterpreterBridge( + if (cl->IsQuickToInterpreterBridge( reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)))) { use_dex_cache = true; } @@ -1389,8 +1391,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType is_in_image = IsImageClass(method->GetDeclaringClassDescriptor()); } else { is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 && - Runtime::Current()->GetHeap()->FindSpaceFromObject(method->GetDeclaringClass(), - false)->IsImageSpace(); + heap->FindSpaceFromObject(method->GetDeclaringClass(), false)->IsImageSpace(); } if (!is_in_image) { // We can only branch directly to Methods that are resolved in the DexCache. @@ -1403,14 +1404,14 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType bool must_use_direct_pointers = false; mirror::DexCache* dex_cache = declaring_class->GetDexCache(); if (target_method->dex_file == dex_cache->GetDexFile() && - !(runtime->UseJit() && dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr)) { + !(runtime->UseJit() && dex_cache->GetResolvedMethod( + method->GetDexMethodIndex(), pointer_size) == nullptr)) { target_method->dex_method_index = method->GetDexMethodIndex(); } else { if (no_guarantee_of_dex_cache_entry) { // See if the method is also declared in this dex cache. - uint32_t dex_method_idx = - method->FindDexMethodIndexInOtherDexFile(*target_method->dex_file, - target_method->dex_method_index); + uint32_t dex_method_idx = method->FindDexMethodIndexInOtherDexFile( + *target_method->dex_file, target_method->dex_method_index); if (dex_method_idx != DexFile::kDexNoIndex) { target_method->dex_method_index = dex_method_idx; } else { @@ -1431,7 +1432,13 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType *type = sharp_type; } } else { - bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace(); + auto* image_space = heap->GetImageSpace(); + bool method_in_image = false; + if (image_space != nullptr) { + const auto& method_section = image_space->GetImageHeader().GetMethodsSection(); + method_in_image = method_section.Contains( + reinterpret_cast<uint8_t*>(method) - image_space->Begin()); + } if (method_in_image || compiling_boot || runtime->UseJit()) { // We know we must be able to get to the method in the image, so use that pointer. // In the case where we are the JIT, we can always use direct pointers since we know where @@ -1469,21 +1476,16 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui int stats_flags = 0; ScopedObjectAccess soa(Thread::Current()); // Try to resolve the method and compiling method's class. - mirror::ArtMethod* resolved_method; - mirror::Class* referrer_class; StackHandleScope<3> hs(soa.Self()); Handle<mirror::DexCache> dex_cache( hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()))); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()))); - { - uint32_t method_idx = target_method->dex_method_index; - Handle<mirror::ArtMethod> resolved_method_handle(hs.NewHandle( - ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type))); - referrer_class = (resolved_method_handle.Get() != nullptr) - ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr; - resolved_method = resolved_method_handle.Get(); - } + uint32_t method_idx = target_method->dex_method_index; + ArtMethod* resolved_method = ResolveMethod( + soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type); + auto h_referrer_class = hs.NewHandle(resolved_method != nullptr ? + ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr); bool result = false; if (resolved_method != nullptr) { *vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type); @@ -1492,13 +1494,13 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc); stats_flags = IsFastInvoke( - soa, dex_cache, class_loader, mUnit, referrer_class, resolved_method, + soa, dex_cache, class_loader, mUnit, h_referrer_class.Get(), resolved_method, invoke_type, target_method, devirt_target, direct_code, direct_method); result = stats_flags != 0; } else { // Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts. - if (UNLIKELY(referrer_class == nullptr) || - UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(), + if (UNLIKELY(h_referrer_class.Get() == nullptr) || + UNLIKELY(!h_referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(), resolved_method, dex_cache.Get(), target_method->dex_method_index)) || *invoke_type == kSuper) { @@ -1506,8 +1508,9 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui } else { // Sharpening failed so generate a regular resolved method dispatch. stats_flags = kFlagMethodResolved; - GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method, - &stats_flags, target_method, direct_code, direct_method); + GetCodeAndMethodForDirectCall( + invoke_type, *invoke_type, false, h_referrer_class.Get(), resolved_method, &stats_flags, + target_method, direct_code, direct_method); result = true; } } @@ -1773,20 +1776,18 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag } if (resolve_fields_and_methods) { while (it.HasNextDirectMethod()) { - mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, - NullHandle<mirror::ArtMethod>(), - it.GetMethodInvokeType(class_def)); + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); if (method == nullptr) { CheckAndClearResolveException(soa.Self()); } it.Next(); } while (it.HasNextVirtualMethod()) { - mirror::ArtMethod* method = class_linker->ResolveMethod(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, - NullHandle<mirror::ArtMethod>(), - it.GetMethodInvokeType(class_def)); + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); if (method == nullptr) { CheckAndClearResolveException(soa.Self()); } diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 2cc2409a34..68c905eb22 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -116,11 +116,11 @@ class CompilerDriver { TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_); - CompiledMethod* CompileMethod(Thread* self, mirror::ArtMethod*) + CompiledMethod* CompileMethod(Thread* self, ArtMethod*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED; // Compile a single Method. - void CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) + void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); VerificationResults* GetVerificationResults() const { @@ -288,7 +288,7 @@ class CompilerDriver { // return DexFile::kDexNoIndex through `storage_index`. bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache, mirror::Class* referrer_class, - mirror::ArtMethod* resolved_method, + ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -303,7 +303,7 @@ class CompilerDriver { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a method. Returns null on failure, including incompatible class change. - mirror::ArtMethod* ResolveMethod( + ArtMethod* ResolveMethod( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true) @@ -311,13 +311,13 @@ class CompilerDriver { // Get declaration location of a resolved field. void GetResolvedMethodDexFileLocation( - mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file, + ArtMethod* resolved_method, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the index in the vtable of the method. uint16_t GetResolvedMethodVTableIndex( - mirror::ArtMethod* resolved_method, InvokeType type) + ArtMethod* resolved_method, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value @@ -325,7 +325,7 @@ class CompilerDriver { int IsFastInvoke( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, - mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type, + mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, MethodReference* target_method, const MethodReference* devirt_target, uintptr_t* direct_code, uintptr_t* direct_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -333,7 +333,7 @@ class CompilerDriver { // Is method's class initialized for an invoke? // For static invokes to determine whether we need to consider potential call to <clinit>(). // For non-static invokes, assuming a non-null reference, the class is always initialized. - bool IsMethodsClassInitialized(mirror::Class* referrer_class, mirror::ArtMethod* resolved_method) + bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the @@ -526,7 +526,7 @@ class CompilerDriver { InvokeType sharp_type, bool no_guarantee_of_dex_cache_entry, const mirror::Class* referrer_class, - mirror::ArtMethod* method, + ArtMethod* method, /*out*/int* stats_flags, MethodReference* target_method, uintptr_t* direct_code, uintptr_t* direct_method) diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 5085f32aec..ba03f5a5d4 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -20,11 +20,11 @@ #include <stdio.h> #include <memory> +#include "art_method-inl.h" #include "class_linker-inl.h" #include "common_compiler_test.h" #include "dex_file.h" #include "gc/heap.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" @@ -85,11 +85,12 @@ class CompilerDriverTest : public CommonCompilerTest { hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader))); mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader); CHECK(c != nullptr); - for (size_t j = 0; j < c->NumDirectMethods(); j++) { - MakeExecutable(c->GetDirectMethod(j)); + const auto pointer_size = class_linker->GetImagePointerSize(); + for (auto& m : c->GetDirectMethods(pointer_size)) { + MakeExecutable(&m); } - for (size_t j = 0; j < c->NumVirtualMethods(); j++) { - MakeExecutable(c->GetVirtualMethod(j)); + for (auto& m : c->GetVirtualMethods(pointer_size)) { + MakeExecutable(&m); } } } @@ -120,8 +121,10 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { << " " << dex.GetTypeDescriptor(dex.GetTypeId(i)); } EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods()); + auto* cl = Runtime::Current()->GetClassLinker(); + auto pointer_size = cl->GetImagePointerSize(); for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i); + ArtMethod* method = dex_cache->GetResolvedMethod(i, pointer_size); EXPECT_TRUE(method != nullptr) << "method_idx=" << i << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " " << dex.GetMethodName(dex.GetMethodId(i)); @@ -131,7 +134,7 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { } EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields()); for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { - ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache); + ArtField* field = cl->GetResolvedField(i, dex_cache); EXPECT_TRUE(field != nullptr) << "field_idx=" << i << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i)) << " " << dex.GetFieldName(dex.GetFieldId(i)); @@ -157,12 +160,15 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) { // Create a jobj_ of ConcreteClass, NOT AbstractClass. jclass c_class = env_->FindClass("ConcreteClass"); + jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V"); + jobject jobj_ = env_->NewObject(c_class, constructor); ASSERT_TRUE(jobj_ != nullptr); // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception. env_->CallNonvirtualVoidMethod(jobj_, class_, mid_); + EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE); jthrowable exception = env_->ExceptionOccurred(); env_->ExceptionClear(); @@ -212,11 +218,10 @@ TEST_F(CompilerDriverMethodsTest, Selection) { std::unique_ptr<std::unordered_set<std::string>> expected(GetCompiledMethods()); - for (int32_t i = 0; static_cast<uint32_t>(i) < klass->NumDirectMethods(); i++) { - mirror::ArtMethod* m = klass->GetDirectMethod(i); - std::string name = PrettyMethod(m, true); - const void* code = - m->GetEntryPointFromQuickCompiledCodePtrSize(InstructionSetPointerSize(kRuntimeISA)); + const auto pointer_size = class_linker->GetImagePointerSize(); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + std::string name = PrettyMethod(&m, true); + const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); ASSERT_NE(code, nullptr); if (expected->find(name) != expected->end()) { expected->erase(name); diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc index f75638d517..4219d97411 100644 --- a/compiler/elf_writer.cc +++ b/compiler/elf_writer.cc @@ -16,6 +16,7 @@ #include "elf_writer.h" +#include "art_method-inl.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "dex_file-inl.h" @@ -23,7 +24,6 @@ #include "driver/compiler_driver.h" #include "elf_file.h" #include "invoke_type.h" -#include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "oat.h" #include "scoped_thread_state_change.h" diff --git a/compiler/image_test.cc b/compiler/image_test.cc index eaf3489f8f..772cc80146 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -105,14 +105,16 @@ TEST_F(ImageTest, WriteRead) { << oat_file.GetFilename(); } + uint64_t image_file_size; { std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str())); ASSERT_TRUE(file.get() != nullptr); ImageHeader image_header; ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true); ASSERT_TRUE(image_header.IsValid()); - ASSERT_GE(image_header.GetImageBitmapOffset(), sizeof(image_header)); - ASSERT_NE(0U, image_header.GetImageBitmapSize()); + const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap); + ASSERT_GE(bitmap_section.Offset(), sizeof(image_header)); + ASSERT_NE(0U, bitmap_section.Size()); gc::Heap* heap = Runtime::Current()->GetHeap(); ASSERT_TRUE(!heap->GetContinuousSpaces().empty()); @@ -120,7 +122,8 @@ TEST_F(ImageTest, WriteRead) { ASSERT_FALSE(space->IsImageSpace()); ASSERT_TRUE(space != nullptr); ASSERT_TRUE(space->IsMallocSpace()); - ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength())); + + image_file_size = file->GetLength(); } ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr); @@ -166,6 +169,9 @@ TEST_F(ImageTest, WriteRead) { ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace()); gc::space::ImageSpace* image_space = heap->GetImageSpace(); + ASSERT_TRUE(image_space != nullptr); + ASSERT_LE(image_space->Size(), image_file_size); + image_space->VerifyImageAllocations(); uint8_t* image_begin = image_space->Begin(); uint8_t* image_end = image_space->End(); @@ -195,25 +201,23 @@ TEST_F(ImageTest, WriteRead) { TEST_F(ImageTest, ImageHeaderIsValid) { uint32_t image_begin = ART_BASE_ADDRESS; uint32_t image_size_ = 16 * KB; - uint32_t image_bitmap_offset = 0; - uint32_t image_bitmap_size = 0; uint32_t image_roots = ART_BASE_ADDRESS + (1 * KB); uint32_t oat_checksum = 0; uint32_t oat_file_begin = ART_BASE_ADDRESS + (4 * KB); // page aligned uint32_t oat_data_begin = ART_BASE_ADDRESS + (8 * KB); // page aligned uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB); uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB); + ImageSection sections[ImageHeader::kSectionCount]; ImageHeader image_header(image_begin, image_size_, - 0u, 0u, - image_bitmap_offset, - image_bitmap_size, + sections, image_roots, oat_checksum, oat_file_begin, oat_data_begin, oat_data_end, oat_file_end, + sizeof(void*), /*compile_pic*/false); ASSERT_TRUE(image_header.IsValid()); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 195949bf3c..dd62d94e0f 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -23,6 +23,7 @@ #include <vector> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" @@ -43,11 +44,12 @@ #include "intern_table.h" #include "linear_alloc.h" #include "lock_word.h" -#include "mirror/art_method-inl.h" +#include "mirror/abstract_method.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" +#include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" @@ -58,10 +60,8 @@ #include "handle_scope-inl.h" #include "utils/dex_cache_arrays_layout-inl.h" -using ::art::mirror::ArtMethod; using ::art::mirror::Class; using ::art::mirror::DexCache; -using ::art::mirror::EntryPointFromInterpreter; using ::art::mirror::Object; using ::art::mirror::ObjectArray; using ::art::mirror::String; @@ -169,10 +169,11 @@ bool ImageWriter::Write(const std::string& image_filename, ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset); Thread::Current()->TransitionFromSuspendedToRunnable(); + CreateHeader(oat_loaded_size, oat_data_offset); + CopyAndFixupNativeData(); // TODO: heap validation can't handle these fix up passes. Runtime::Current()->GetHeap()->DisableObjectValidation(); - CopyAndFixupNativeData(); CopyAndFixupObjects(); Thread::Current()->TransitionFromRunnableToSuspended(kNative); @@ -195,9 +196,8 @@ bool ImageWriter::Write(const std::string& image_filename, return EXIT_FAILURE; } - // Write out the image + fields. - const auto write_count = image_header->GetImageSize() + image_header->GetArtFieldsSize(); - CHECK_EQ(image_end_, image_header->GetImageSize()); + // Write out the image + fields + methods. + const auto write_count = image_header->GetImageSize(); if (!image_file->WriteFully(image_->Begin(), write_count)) { PLOG(ERROR) << "Failed to write image file " << image_filename; image_file->Erase(); @@ -205,17 +205,16 @@ bool ImageWriter::Write(const std::string& image_filename, } // Write out the image bitmap at the page aligned start of the image end. - CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize); + const auto& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap); + CHECK_ALIGNED(bitmap_section.Offset(), kPageSize); if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()), - image_header->GetImageBitmapSize(), - image_header->GetImageBitmapOffset())) { + bitmap_section.Size(), bitmap_section.Offset())) { PLOG(ERROR) << "Failed to write image file " << image_filename; image_file->Erase(); return false; } - CHECK_EQ(image_header->GetImageBitmapOffset() + image_header->GetImageBitmapSize(), - static_cast<size_t>(image_file->GetLength())); + CHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength())); if (image_file->FlushCloseOrErase() != 0) { PLOG(ERROR) << "Failed to flush and close image file " << image_filename; return false; @@ -245,9 +244,16 @@ void ImageWriter::SetImageOffset(mirror::Object* object, } // The object is already deflated from when we set the bin slot. Just overwrite the lock word. object->SetLockWord(LockWord::FromForwardingAddress(offset), false); + DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); DCHECK(IsImageOffsetAssigned(object)); } +void ImageWriter::UpdateImageOffset(mirror::Object* obj, uintptr_t offset) { + DCHECK(IsImageOffsetAssigned(obj)) << obj << " " << offset; + obj->SetLockWord(LockWord::FromForwardingAddress(offset), false); + DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0u); +} + void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) { DCHECK(object != nullptr); DCHECK_NE(image_objects_offset_begin_, 0u); @@ -304,6 +310,7 @@ void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) { } object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)), false); + DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); DCHECK(IsImageBinSlotAssigned(object)); } @@ -324,16 +331,18 @@ void ImageWriter::PrepareDexCacheArraySlots() { auto strings_size = layout.StringsSize(dex_file->NumStringIds()); dex_cache_array_indexes_.Put( dex_cache->GetResolvedTypes(), - DexCacheArrayLocation {size + layout.TypesOffset(), types_size}); + DexCacheArrayLocation {size + layout.TypesOffset(), types_size, kBinRegular}); dex_cache_array_indexes_.Put( dex_cache->GetResolvedMethods(), - DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size}); + DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size, kBinArtMethodClean}); + AddMethodPointerArray(dex_cache->GetResolvedMethods()); dex_cache_array_indexes_.Put( dex_cache->GetResolvedFields(), - DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size}); + DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size, kBinArtField}); + pointer_arrays_.emplace(dex_cache->GetResolvedFields(), kBinArtField); dex_cache_array_indexes_.Put( dex_cache->GetStrings(), - DexCacheArrayLocation {size + layout.StringsOffset(), strings_size}); + DexCacheArrayLocation {size + layout.StringsOffset(), strings_size, kBinRegular}); size += layout.Size(); CHECK_EQ(layout.Size(), types_size + methods_size + fields_size + strings_size); } @@ -342,6 +351,23 @@ void ImageWriter::PrepareDexCacheArraySlots() { bin_slot_sizes_[kBinDexCacheArray] = size; } +void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) { + DCHECK(arr != nullptr); + if (kIsDebugBuild) { + for (size_t i = 0, len = arr->GetLength(); i < len; i++) { + auto* method = arr->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_); + if (method != nullptr && !method->IsRuntimeMethod()) { + auto* klass = method->GetDeclaringClass(); + CHECK(klass == nullptr || IsImageClass(klass)) << PrettyClass(klass) + << " should be an image class"; + } + } + } + // kBinArtMethodClean picked arbitrarily, just required to differentiate between ArtFields and + // ArtMethods. + pointer_arrays_.emplace(arr, kBinArtMethodClean); +} + void ImageWriter::AssignImageBinSlot(mirror::Object* object) { DCHECK(object != nullptr); size_t object_size = object->SizeOf(); @@ -393,6 +419,20 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) { bin = kBinClassVerified; mirror::Class* klass = object->AsClass(); + // Add non-embedded vtable to the pointer array table if there is one. + auto* vtable = klass->GetVTable(); + if (vtable != nullptr) { + AddMethodPointerArray(vtable); + } + auto* iftable = klass->GetIfTable(); + if (iftable != nullptr) { + for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { + if (iftable->GetMethodArrayCount(i) > 0) { + AddMethodPointerArray(iftable->GetMethodArray(i)); + } + } + } + if (klass->GetStatus() == Class::kStatusInitialized) { bin = kBinClassInitialized; @@ -417,26 +457,11 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) { } } } - } else if (object->IsArtMethod<kVerifyNone>()) { - mirror::ArtMethod* art_method = down_cast<ArtMethod*>(object); - if (art_method->IsNative()) { - bin = kBinArtMethodNative; - } else { - mirror::Class* declaring_class = art_method->GetDeclaringClass(); - if (declaring_class->GetStatus() != Class::kStatusInitialized) { - bin = kBinArtMethodNotInitialized; - } else { - // This is highly unlikely to dirty since there's no entry points to mutate. - bin = kBinArtMethodsManagedInitialized; - } - } } else if (object->GetClass<kVerifyNone>()->IsStringClass()) { bin = kBinString; // Strings are almost always immutable (except for object header). } else if (object->IsArrayInstance()) { mirror::Class* klass = object->GetClass<kVerifyNone>(); - auto* component_type = klass->GetComponentType(); - if (!component_type->IsPrimitive() || component_type->IsPrimitiveInt() || - component_type->IsPrimitiveLong()) { + if (klass->IsObjectArrayClass() || klass->IsIntArrayClass() || klass->IsLongArrayClass()) { auto it = dex_cache_array_indexes_.find(object); if (it != dex_cache_array_indexes_.end()) { bin = kBinDexCacheArray; @@ -451,6 +476,7 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) { size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment if (bin != kBinDexCacheArray) { + DCHECK(dex_cache_array_indexes_.find(object) == dex_cache_array_indexes_.end()) << object; current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned). // Move the current bin size up to accomodate the object we just assigned a bin slot. bin_slot_sizes_[bin] += offset_delta; @@ -468,6 +494,15 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) { DCHECK_LT(image_end_, image_->Size()); } +bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const { + if (m->IsNative()) { + return true; + } + mirror::Class* declaring_class = m->GetDeclaringClass(); + // Initialized is highly unlikely to dirty since there's no entry points to mutate. + return declaring_class == nullptr || declaring_class->GetStatus() != Class::kStatusInitialized; +} + bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const { DCHECK(object != nullptr); @@ -604,6 +639,9 @@ void ImageWriter::ComputeEagerResolvedStrings() { } bool ImageWriter::IsImageClass(Class* klass) { + if (klass == nullptr) { + return false; + } std::string temp; return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); } @@ -619,6 +657,7 @@ void ImageWriter::PruneNonImageClasses() { } Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); + Thread* self = Thread::Current(); // Make a list of classes we would like to prune. std::set<std::string> non_image_classes; @@ -634,27 +673,45 @@ void ImageWriter::PruneNonImageClasses() { } // Clear references to removed classes from the DexCaches. - ArtMethod* resolution_method = runtime->GetResolutionMethod(); - ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); - size_t dex_cache_count = class_linker->GetDexCacheCount(); + const ArtMethod* resolution_method = runtime->GetResolutionMethod(); + size_t dex_cache_count; + { + ReaderMutexLock mu(self, *class_linker->DexLock()); + dex_cache_count = class_linker->GetDexCacheCount(); + } for (size_t idx = 0; idx < dex_cache_count; ++idx) { - DexCache* dex_cache = class_linker->GetDexCache(idx); + DexCache* dex_cache; + { + ReaderMutexLock mu(self, *class_linker->DexLock()); + dex_cache = class_linker->GetDexCache(idx); + } for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { Class* klass = dex_cache->GetResolvedType(i); if (klass != nullptr && !IsImageClass(klass)) { dex_cache->SetResolvedType(i, nullptr); } } - for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { - ArtMethod* method = dex_cache->GetResolvedMethod(i); - if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) { - dex_cache->SetResolvedMethod(i, resolution_method); + auto* resolved_methods = down_cast<mirror::PointerArray*>(dex_cache->GetResolvedMethods()); + for (size_t i = 0, len = resolved_methods->GetLength(); i < len; i++) { + auto* method = resolved_methods->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_); + if (method != nullptr) { + auto* declaring_class = method->GetDeclaringClass(); + // Miranda methods may be held live by a class which was not an image class but have a + // declaring class which is an image class. Set it to the resolution method to be safe and + // prevent dangling pointers. + if (method->IsMiranda() || !IsImageClass(declaring_class)) { + resolved_methods->SetElementPtrSize(i, resolution_method, target_ptr_size_); + } else { + // Check that the class is still in the classes table. + DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class " + << PrettyClass(declaring_class) << " not in class linker table"; + } } } for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { - ArtField* field = dex_cache->GetResolvedField(i, sizeof(void*)); + ArtField* field = dex_cache->GetResolvedField(i, target_ptr_size_); if (field != nullptr && !IsImageClass(field->GetDeclaringClass())) { - dex_cache->SetResolvedField(i, nullptr, sizeof(void*)); + dex_cache->SetResolvedField(i, nullptr, target_ptr_size_); } } // Clean the dex field. It might have been populated during the initialization phase, but @@ -757,19 +814,8 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const { } // build an Object[] of the roots needed to restore the runtime - Handle<ObjectArray<Object>> image_roots(hs.NewHandle( + auto image_roots(hs.NewHandle( ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax))); - image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod()); - image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod()); - image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod, - runtime->GetImtUnimplementedMethod()); - image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt()); - image_roots->Set<false>(ImageHeader::kCalleeSaveMethod, - runtime->GetCalleeSaveMethod(Runtime::kSaveAll)); - image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod, - runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)); - image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod, - runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get()); image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots()); for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { @@ -816,7 +862,7 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { // Walk static fields of a Class. if (h_obj->IsClass()) { size_t num_reference_static_fields = klass->NumReferenceStaticFields(); - MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(); + MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(target_ptr_size_); for (size_t i = 0; i < num_reference_static_fields; ++i) { mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset); if (value != nullptr) { @@ -825,21 +871,38 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(mirror::HeapReference<mirror::Object>)); } - // Visit and assign offsets for fields. - ArtField* fields[2] = { h_obj->AsClass()->GetSFields(), h_obj->AsClass()->GetIFields() }; - size_t num_fields[2] = { h_obj->AsClass()->NumStaticFields(), - h_obj->AsClass()->NumInstanceFields() }; + auto* as_klass = h_obj->AsClass(); + ArtField* fields[] = { as_klass->GetSFields(), as_klass->GetIFields() }; + size_t num_fields[] = { as_klass->NumStaticFields(), as_klass->NumInstanceFields() }; for (size_t i = 0; i < 2; ++i) { for (size_t j = 0; j < num_fields[i]; ++j) { auto* field = fields[i] + j; - auto it = art_field_reloc_.find(field); - CHECK(it == art_field_reloc_.end()) << "Field at index " << i << ":" << j + auto it = native_object_reloc_.find(field); + CHECK(it == native_object_reloc_.end()) << "Field at index " << i << ":" << j << " already assigned " << PrettyField(field); - art_field_reloc_.emplace(field, bin_slot_sizes_[kBinArtField]); + native_object_reloc_.emplace( + field, NativeObjectReloc { bin_slot_sizes_[kBinArtField], kBinArtField }); bin_slot_sizes_[kBinArtField] += sizeof(ArtField); } } + // Visit and assign offsets for methods. + IterationRange<StrideIterator<ArtMethod>> method_arrays[] = { + as_klass->GetDirectMethods(target_ptr_size_), + as_klass->GetVirtualMethods(target_ptr_size_) + }; + for (auto& array : method_arrays) { + bool any_dirty = false; + size_t count = 0; + for (auto& m : array) { + any_dirty = any_dirty || WillMethodBeDirty(&m); + ++count; + } + for (auto& m : array) { + AssignMethodOffset(&m, any_dirty ? kBinArtMethodDirty : kBinArtMethodClean); + } + (any_dirty ? dirty_methods_ : clean_methods_) += count; + } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); @@ -854,6 +917,14 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } +void ImageWriter::AssignMethodOffset(ArtMethod* method, Bin bin) { + auto it = native_object_reloc_.find(method); + CHECK(it == native_object_reloc_.end()) << "Method " << method << " already assigned " + << PrettyMethod(method); + native_object_reloc_.emplace(method, NativeObjectReloc { bin_slot_sizes_[bin], bin }); + bin_slot_sizes_[bin] += ArtMethod::ObjectSize(target_ptr_size_); +} + void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) { ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg); DCHECK(writer != nullptr); @@ -879,11 +950,12 @@ void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) { } void ImageWriter::CalculateNewObjectOffsets() { - Thread* self = Thread::Current(); + Thread* const self = Thread::Current(); StackHandleScope<1> hs(self); Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots())); - gc::Heap* heap = Runtime::Current()->GetHeap(); + auto* runtime = Runtime::Current(); + auto* heap = runtime->GetHeap(); DCHECK_EQ(0U, image_end_); // Leave space for the header, but do not write it yet, we need to @@ -896,6 +968,21 @@ void ImageWriter::CalculateNewObjectOffsets() { PrepareDexCacheArraySlots(); // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. heap->VisitObjects(WalkFieldsCallback, this); + // Write the image runtime methods. + image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod(); + image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod(); + image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod(); + image_methods_[ImageHeader::kCalleeSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kSaveAll); + image_methods_[ImageHeader::kRefsOnlySaveMethod] = + runtime->GetCalleeSaveMethod(Runtime::kRefsOnly); + image_methods_[ImageHeader::kRefsAndArgsSaveMethod] = + runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs); + for (auto* m : image_methods_) { + CHECK(m != nullptr); + CHECK(m->IsRuntimeMethod()); + AssignMethodOffset(m, kBinArtMethodDirty); + } + // Calculate cumulative bin slot sizes. size_t previous_sizes = 0u; for (size_t i = 0; i != kBinSize; ++i) { @@ -913,7 +1000,14 @@ void ImageWriter::CalculateNewObjectOffsets() { image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get())); - // Note that image_end_ is left at end of used mirror space + // Update the native relocations by adding their bin sums. + for (auto& pair : native_object_reloc_) { + auto& native_reloc = pair.second; + native_reloc.offset += image_objects_offset_begin_ + + bin_slot_previous_sizes_[native_reloc.bin_type]; + } + + // Note that image_end_ is left at end of used mirror object section. } void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { @@ -922,47 +1016,87 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size; oat_data_begin_ = oat_file_begin + oat_data_offset; const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size(); - // Write out sections. - size_t cur_pos = image_end_; - // Add fields. - auto fields_offset = cur_pos; - CHECK_EQ(image_objects_offset_begin_ + GetBinSizeSum(kBinArtField), fields_offset); - auto fields_size = bin_slot_sizes_[kBinArtField]; - cur_pos += fields_size; - // Return to write header at start of image with future location of image_roots. At this point, - // image_end_ is the size of the image (excluding bitmaps, ArtFields). - /* - const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment; - const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) / - heap_bytes_per_bitmap_byte; - */ + + // Create the image sections. + ImageSection sections[ImageHeader::kSectionCount]; + // Objects section + auto* objects_section = §ions[ImageHeader::kSectionObjects]; + *objects_section = ImageSection(0u, image_end_); + size_t cur_pos = objects_section->End(); + // Add field section. + auto* field_section = §ions[ImageHeader::kSectionArtFields]; + *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]); + CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField], + field_section->Offset()); + cur_pos = field_section->End(); + // Add method section. + auto* methods_section = §ions[ImageHeader::kSectionArtMethods]; + *methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] + + bin_slot_sizes_[kBinArtMethodDirty]); + CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean], + methods_section->Offset()); + cur_pos = methods_section->End(); + // Finally bitmap section. const size_t bitmap_bytes = image_bitmap_->Size(); - auto bitmap_offset = RoundUp(cur_pos, kPageSize); - auto bitmap_size = RoundUp(bitmap_bytes, kPageSize); - cur_pos += bitmap_size; - new (image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_begin_), - static_cast<uint32_t>(image_end_), - fields_offset, fields_size, - bitmap_offset, bitmap_size, - image_roots_address_, - oat_file_->GetOatHeader().GetChecksum(), - PointerToLowMemUInt32(oat_file_begin), - PointerToLowMemUInt32(oat_data_begin_), - PointerToLowMemUInt32(oat_data_end), - PointerToLowMemUInt32(oat_file_end), - compile_pic_); + auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; + *bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize)); + cur_pos = bitmap_section->End(); + if (kIsDebugBuild) { + size_t idx = 0; + for (auto& section : sections) { + LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section; + ++idx; + } + LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_; + } + // Create the header. + new (image_->Begin()) ImageHeader( + PointerToLowMemUInt32(image_begin_), static_cast<uint32_t>(methods_section->End()), sections, + image_roots_address_, oat_file_->GetOatHeader().GetChecksum(), + PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(oat_data_begin_), + PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), target_ptr_size_, + compile_pic_); +} + +ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) { + auto it = native_object_reloc_.find(method); + CHECK(it != native_object_reloc_.end()) << PrettyMethod(method) << " @ " << method; + CHECK_GE(it->second.offset, image_end_) << "ArtMethods should be after Objects"; + return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset); } void ImageWriter::CopyAndFixupNativeData() { - // Copy ArtFields to their locations and update the array for convenience. - auto fields_offset = image_objects_offset_begin_ + GetBinSizeSum(kBinArtField); - for (auto& pair : art_field_reloc_) { - pair.second += fields_offset; - auto* dest = image_->Begin() + pair.second; - DCHECK_GE(dest, image_->Begin() + image_end_); - memcpy(dest, pair.first, sizeof(ArtField)); - reinterpret_cast<ArtField*>(dest)->SetDeclaringClass( - down_cast<Class*>(GetImageAddress(pair.first->GetDeclaringClass()))); + // Copy ArtFields and methods to their locations and update the array for convenience. + for (auto& pair : native_object_reloc_) { + auto& native_reloc = pair.second; + if (native_reloc.bin_type == kBinArtField) { + auto* dest = image_->Begin() + native_reloc.offset; + DCHECK_GE(dest, image_->Begin() + image_end_); + memcpy(dest, pair.first, sizeof(ArtField)); + reinterpret_cast<ArtField*>(dest)->SetDeclaringClass( + GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass())); + } else { + CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type; + auto* dest = image_->Begin() + native_reloc.offset; + DCHECK_GE(dest, image_->Begin() + image_end_); + CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first), + reinterpret_cast<ArtMethod*>(dest)); + } + } + // Fixup the image method roots. + auto* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); + const auto& methods_section = image_header->GetMethodsSection(); + for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { + auto* m = image_methods_[i]; + CHECK(m != nullptr); + auto it = native_object_reloc_.find(m); + CHECK(it != native_object_reloc_.end()) << "No fowarding for " << PrettyMethod(m); + auto& native_reloc = it->second; + CHECK(methods_section.Contains(native_reloc.offset)) << native_reloc.offset << " not in " + << methods_section; + CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type; + auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset); + image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest); } } @@ -984,58 +1118,37 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj); } -bool ImageWriter::CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj, - mirror::Class* klass) { - if (!klass->IsArrayClass()) { - return false; - } - auto* component_type = klass->GetComponentType(); - bool is_int_arr = component_type->IsPrimitiveInt(); - bool is_long_arr = component_type->IsPrimitiveLong(); - if (!is_int_arr && !is_long_arr) { - return false; - } - auto it = dex_cache_array_indexes_.find(obj); // Is this a dex cache array? - if (it == dex_cache_array_indexes_.end()) { - return false; - } - mirror::Array* arr = obj->AsArray(); - CHECK_EQ(reinterpret_cast<Object*>( - image_->Begin() + it->second.offset_ + image_objects_offset_begin_), dst); - dex_cache_array_indexes_.erase(it); - // Fixup int pointers for the field array. - CHECK(!arr->IsObjectArray()); +void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, + mirror::Class* klass, Bin array_type) { + CHECK(klass->IsArrayClass()); + CHECK(arr->IsIntArray() || arr->IsLongArray()) << PrettyClass(klass) << " " << arr; + // Fixup int and long pointers for the ArtMethod or ArtField arrays. const size_t num_elements = arr->GetLength(); - if (target_ptr_size_ == 4) { - // Will get fixed up by fixup object. - dst->SetClass(down_cast<mirror::Class*>( - GetImageAddress(mirror::IntArray::GetArrayClass()))); - } else { - DCHECK_EQ(target_ptr_size_, 8u); - dst->SetClass(down_cast<mirror::Class*>( - GetImageAddress(mirror::LongArray::GetArrayClass()))); - } - mirror::Array* dest_array = down_cast<mirror::Array*>(dst); - dest_array->SetLength(num_elements); + dst->SetClass(GetImageAddress(arr->GetClass())); + auto* dest_array = down_cast<mirror::PointerArray*>(dst); for (size_t i = 0, count = num_elements; i < count; ++i) { - ArtField* field = reinterpret_cast<ArtField*>(is_int_arr ? - arr->AsIntArray()->GetWithoutChecks(i) : arr->AsLongArray()->GetWithoutChecks(i)); - uint8_t* fixup_location = nullptr; - if (field != nullptr) { - auto it2 = art_field_reloc_.find(field); - CHECK(it2 != art_field_reloc_.end()) << "No relocation for field " << PrettyField(field); - fixup_location = image_begin_ + it2->second; - } - if (target_ptr_size_ == 4) { - down_cast<mirror::IntArray*>(dest_array)->SetWithoutChecks<kVerifyNone>( - i, static_cast<uint32_t>(reinterpret_cast<uint64_t>(fixup_location))); - } else { - down_cast<mirror::LongArray*>(dest_array)->SetWithoutChecks<kVerifyNone>( - i, reinterpret_cast<uint64_t>(fixup_location)); + auto* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_); + if (elem != nullptr) { + auto it = native_object_reloc_.find(elem); + if (it == native_object_reloc_.end()) { + if (IsArtMethodBin(array_type)) { + auto* method = reinterpret_cast<ArtMethod*>(elem); + LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ " + << method << " idx=" << i << "/" << num_elements << " with declaring class " + << PrettyClass(method->GetDeclaringClass()); + } else { + CHECK_EQ(array_type, kBinArtField); + auto* field = reinterpret_cast<ArtField*>(elem); + LOG(FATAL) << "No relocation entry for ArtField " << PrettyField(field) << " @ " + << field << " idx=" << i << "/" << num_elements << " with declaring class " + << PrettyClass(field->GetDeclaringClass()); + } + } else { + elem = image_begin_ + it->second.offset; + } } + dest_array->SetElementPtrSize<false, true>(i, elem, target_ptr_size_); } - dst->SetLockWord(LockWord::Default(), false); - return true; } void ImageWriter::CopyAndFixupObject(Object* obj) { @@ -1043,19 +1156,8 @@ void ImageWriter::CopyAndFixupObject(Object* obj) { size_t offset = GetImageOffset(obj); auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset); const uint8_t* src = reinterpret_cast<const uint8_t*>(obj); - size_t n; - mirror::Class* klass = obj->GetClass(); - if (CopyAndFixupIfDexCacheFieldArray(dst, obj, klass)) { - return; - } - if (klass->IsArtMethodClass()) { - // Size without pointer fields since we don't want to overrun the buffer if target art method - // is 32 bits but source is 64 bits. - n = mirror::ArtMethod::SizeWithoutPointerFields(target_ptr_size_); - } else { - n = obj->SizeOf(); - } + size_t n = obj->SizeOf(); DCHECK_LE(offset + n, image_->Size()); memcpy(dst, src, n); @@ -1114,23 +1216,51 @@ class FixupClassVisitor FINAL : public FixupVisitor { void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) { // Copy and fix up ArtFields in the class. - ArtField* fields[2] = { orig->AsClass()->GetSFields(), orig->AsClass()->GetIFields() }; - size_t num_fields[2] = { orig->AsClass()->NumStaticFields(), - orig->AsClass()->NumInstanceFields() }; - // Update the arrays. + ArtField* fields[2] = { orig->GetSFields(), orig->GetIFields() }; + size_t num_fields[2] = { orig->NumStaticFields(), orig->NumInstanceFields() }; + // Update the field arrays. for (size_t i = 0; i < 2; ++i) { if (num_fields[i] == 0) { CHECK(fields[i] == nullptr); continue; } - auto it = art_field_reloc_.find(fields[i]); - CHECK(it != art_field_reloc_.end()) << PrettyClass(orig->AsClass()) << " : " - << PrettyField(fields[i]); - auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second); + auto it = native_object_reloc_.find(fields[i]); + CHECK(it != native_object_reloc_.end()) << PrettyClass(orig) << " : " << PrettyField(fields[i]); + auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second.offset); if (i == 0) { - down_cast<Class*>(copy)->SetSFieldsUnchecked(image_fields); + copy->SetSFieldsUnchecked(image_fields); } else { - down_cast<Class*>(copy)->SetIFieldsUnchecked(image_fields); + copy->SetIFieldsUnchecked(image_fields); + } + } + // Update direct / virtual method arrays. + auto* direct_methods = orig->GetDirectMethodsPtr(); + if (direct_methods != nullptr) { + auto it = native_object_reloc_.find(direct_methods); + CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + copy->SetDirectMethodsPtrUnchecked( + reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); + } + auto* virtual_methods = orig->GetVirtualMethodsPtr(); + if (virtual_methods != nullptr) { + auto it = native_object_reloc_.find(virtual_methods); + CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + copy->SetVirtualMethodsPtr( + reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); + } + // Fix up embedded tables. + if (orig->ShouldHaveEmbeddedImtAndVTable()) { + for (int32_t i = 0; i < orig->GetEmbeddedVTableLength(); ++i) { + auto it = native_object_reloc_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_)); + CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + copy->SetEmbeddedVTableEntryUnchecked( + i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_); + } + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + auto it = native_object_reloc_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_)); + CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + copy->SetEmbeddedImTableEntry( + i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_); } } FixupClassVisitor visitor(this, copy); @@ -1148,18 +1278,39 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) { DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig)); } } + auto* klass = orig->GetClass(); + if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) { + // Is this a native dex cache array? + auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig)); + if (it != pointer_arrays_.end()) { + // Should only need to fixup every pointer array exactly once. + FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second); + pointer_arrays_.erase(it); + return; + } + CHECK(dex_cache_array_indexes_.find(orig) == dex_cache_array_indexes_.end()) + << "Should have been pointer array."; + } if (orig->IsClass()) { FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy)); } else { + if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) { + // Need to go update the ArtMethod. + auto* dest = down_cast<mirror::AbstractMethod*>(copy); + auto* src = down_cast<mirror::AbstractMethod*>(orig); + ArtMethod* src_method = src->GetArtMethod(); + auto it = native_object_reloc_.find(src_method); + CHECK(it != native_object_reloc_.end()) << "Missing relocation for AbstractMethod.artMethod " + << PrettyMethod(src_method); + dest->SetArtMethod( + reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); + } FixupVisitor visitor(this, copy); orig->VisitReferences<true /*visit class*/>(visitor, visitor); } - if (orig->IsArtMethod<kVerifyNone>()) { - FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy)); - } } -const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) { +const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) { DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() && !method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method); @@ -1171,27 +1322,31 @@ const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_ method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_)); const uint8_t* quick_code = GetOatAddress(quick_oat_code_offset); *quick_is_interpreted = false; - if (quick_code != nullptr && - (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) { + if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() || + method->GetDeclaringClass()->IsInitialized())) { // We have code for a non-static or initialized method, just use the code. + DCHECK_GE(quick_code, oat_data_begin_); } else if (quick_code == nullptr && method->IsNative() && (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) { // Non-static or initialized native method missing compiled code, use generic JNI version. quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_); + DCHECK_GE(quick_code, oat_data_begin_); } else if (quick_code == nullptr && !method->IsNative()) { // We don't have code at all for a non-native method, use the interpreter. quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_); *quick_is_interpreted = true; + DCHECK_GE(quick_code, oat_data_begin_); } else { CHECK(!method->GetDeclaringClass()->IsInitialized()); // We have code for a static method, but need to go through the resolution stub for class // initialization. quick_code = GetOatAddress(quick_resolution_trampoline_offset_); + DCHECK_GE(quick_code, oat_data_begin_); } return quick_code; } -const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) { +const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) { // Calculate the quick entry point following the same logic as FixupMethod() below. // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); @@ -1213,50 +1368,57 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) { } } -void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { +void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) { + memcpy(copy, orig, ArtMethod::ObjectSize(target_ptr_size_)); + + copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked())); + copy->SetDexCacheResolvedMethods(GetImageAddress(orig->GetDexCacheResolvedMethods())); + copy->SetDexCacheResolvedTypes(GetImageAddress(orig->GetDexCacheResolvedTypes())); + // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to // oat_begin_ - // For 64 bit targets we need to repack the current runtime pointer sized fields to the right - // locations. - // Copy all of the fields from the runtime methods to the target methods first since we did a - // bytewise copy earlier. - copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>( - orig->GetEntryPointFromInterpreterPtrSize(target_ptr_size_), target_ptr_size_); - copy->SetEntryPointFromJniPtrSize<kVerifyNone>( - orig->GetEntryPointFromJniPtrSize(target_ptr_size_), target_ptr_size_); - copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( - orig->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_), target_ptr_size_); // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); if (UNLIKELY(orig == runtime->GetResolutionMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( + copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_); } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() || orig == runtime->GetImtUnimplementedMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( + copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_); + } else if (UNLIKELY(orig->IsRuntimeMethod())) { + bool found_one = false; + for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) { + auto idx = static_cast<Runtime::CalleeSaveType>(i); + if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { + found_one = true; + break; + } + } + CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); + CHECK(copy->IsRuntimeMethod()); } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their // use results in an AbstractMethodError. We use the interpreter to achieve this. if (UNLIKELY(orig->IsAbstract())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>( + copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_); - copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>( + copy->SetEntryPointFromInterpreterPtrSize( reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>( GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_); } else { bool quick_is_interpreted; const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted); - copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_); + copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_); // JNI entrypoint: if (orig->IsNative()) { // The native method's pointer is set to a stub to lookup via dlsym. // Note this is not the code_ pointer, that is handled above. - copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_), - target_ptr_size_); + copy->SetEntryPointFromJniPtrSize( + GetOatAddress(jni_dlsym_lookup_offset_), target_ptr_size_); } // Interpreter entrypoint: @@ -1267,8 +1429,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { EntryPointFromInterpreter* interpreter_entrypoint = reinterpret_cast<EntryPointFromInterpreter*>( const_cast<uint8_t*>(GetOatAddress(interpreter_code))); - copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>( - interpreter_entrypoint, target_ptr_size_); + copy->SetEntryPointFromInterpreterPtrSize(interpreter_entrypoint, target_ptr_size_); } } } @@ -1305,8 +1466,8 @@ size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const { ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) { // These values may need to get updated if more bins are added to the enum Bin - static_assert(kBinBits == 4, "wrong number of bin bits"); - static_assert(kBinShift == 28, "wrong number of shift"); + static_assert(kBinBits == 3, "wrong number of bin bits"); + static_assert(kBinShift == 27, "wrong number of shift"); static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes"); DCHECK_LT(GetBin(), kBinSize); @@ -1326,13 +1487,4 @@ uint32_t ImageWriter::BinSlot::GetIndex() const { return lockword_ & ~kBinMask; } -void ImageWriter::FreeStringDataArray() { - if (string_data_array_ != nullptr) { - gc::space::LargeObjectSpace* los = Runtime::Current()->GetHeap()->GetLargeObjectsSpace(); - if (los != nullptr) { - los->Free(Thread::Current(), reinterpret_cast<mirror::Object*>(string_data_array_)); - } - } -} - } // namespace art diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 5921732399..a35d6ad9c9 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -30,12 +30,13 @@ #include "base/macros.h" #include "driver/compiler_driver.h" #include "gc/space/space.h" +#include "lock_word.h" #include "mem_map.h" #include "oat_file.h" #include "mirror/dex_cache.h" #include "os.h" #include "safe_map.h" -#include "gc/space/space.h" +#include "utils.h" namespace art { @@ -53,18 +54,12 @@ class ImageWriter FINAL { quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic), target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(), - string_data_array_(nullptr) { + dirty_methods_(0u), clean_methods_(0u) { CHECK_NE(image_begin, 0U); + std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr); } ~ImageWriter() { - // For interned strings a large array is allocated to hold all the character data and avoid - // overhead. However, no GC is run anymore at this point. As the array is likely large, it - // will be allocated in the large object space, where valgrind can track every single - // allocation. Not explicitly freeing that array will be recognized as a leak. - if (RUNNING_ON_VALGRIND != 0) { - FreeStringDataArray(); - } } bool PrepareImageAddressSpace(); @@ -73,14 +68,14 @@ class ImageWriter FINAL { return image_roots_address_ != 0u; } - mirror::Object* GetImageAddress(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (object == nullptr) { - return nullptr; - } - return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object)); + template <typename T> + T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return object == nullptr ? nullptr : + reinterpret_cast<T*>(image_begin_ + GetImageOffset(object)); } + ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress( const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { auto it = dex_cache_array_starts_.find(dex_file); @@ -90,11 +85,12 @@ class ImageWriter FINAL { } uint8_t* GetOatFileBegin() const { - return image_begin_ + RoundUp(image_end_ + bin_slot_sizes_[kBinArtField], kPageSize); + return image_begin_ + RoundUp( + image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] + + bin_slot_sizes_[kBinArtMethodClean], kPageSize); } - bool Write(const std::string& image_filename, - const std::string& oat_filename, + bool Write(const std::string& image_filename, const std::string& oat_filename, const std::string& oat_location) LOCKS_EXCLUDED(Locks::mutator_lock_); @@ -124,11 +120,15 @@ class ImageWriter FINAL { kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics kBinClassInitialized, // Class initializers have been run kBinClassVerified, // Class verified, but initializers haven't been run - kBinArtMethodNative, // Art method that is actually native - kBinArtMethodNotInitialized, // Art method with a declaring class that wasn't initialized // Add more bins here if we add more segregation code. - // Non mirror fields must be below. ArtFields should be always clean. + // Non mirror fields must be below. + // ArtFields should be always clean. kBinArtField, + // If the class is initialized, then the ArtMethods are probably clean. + kBinArtMethodClean, + // ArtMethods may be dirty if the class has native methods or a declaring class that isn't + // initialized. + kBinArtMethodDirty, kBinSize, // Number of bins which are for mirror objects. kBinMirrorCount = kBinArtField, @@ -138,9 +138,12 @@ class ImageWriter FINAL { static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1); // uint32 = typeof(lockword_) - static constexpr size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits; + // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK + // failures due to invalid read barrier bits during object field reads. + static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - + LockWord::kReadBarrierStateSize; // 111000.....0 - static constexpr size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift; + static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift; // We use the lock word to store the bin # and bin index of the object in the image. // @@ -172,6 +175,8 @@ class ImageWriter FINAL { bool IsImageOffsetAssigned(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -181,6 +186,8 @@ class ImageWriter FINAL { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void* GetImageAddressCallback(void* writer, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj); @@ -197,10 +204,12 @@ class ImageWriter FINAL { // With Quick, code is within the OatFile, as there are all in one // .o ELF object. DCHECK_LT(offset, oat_file_->Size()); - if (offset == 0u) { - return nullptr; - } - return oat_data_begin_ + offset; + DCHECK(oat_data_begin_ != nullptr); + return offset == 0u ? nullptr : oat_data_begin_ + offset; + } + + static bool IsArtMethodBin(Bin bin) { + return bin == kBinArtMethodClean || bin == kBinArtMethodDirty; } // Returns true if the class was in the original requested image classes list. @@ -257,21 +266,20 @@ class ImageWriter FINAL { static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj, - mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupMethod(mirror::ArtMethod* orig, mirror::ArtMethod* copy) + void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass, + Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get quick code for non-resolution/imt_conflict/abstract method. - const uint8_t* GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) + const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const uint8_t* GetQuickEntryPoint(mirror::ArtMethod* method) + const uint8_t* GetQuickEntryPoint(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Patches references in OatFile to expect runtime addresses. @@ -280,8 +288,11 @@ class ImageWriter FINAL { // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins. size_t GetBinSizeSum(Bin up_to = kBinSize) const; - // Release the string_data_array_. - void FreeStringDataArray(); + // Return true if a method is likely to be dirtied at runtime. + bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Assign the offset for an ArtMethod. + void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const CompilerDriver& compiler_driver_; @@ -308,9 +319,14 @@ class ImageWriter FINAL { struct DexCacheArrayLocation { size_t offset_; size_t length_; + Bin bin_type_; }; SafeMap<mirror::Object*, DexCacheArrayLocation> dex_cache_array_indexes_; + // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need + // to keep track. These include vtable arrays, iftable arrays, and dex caches. + std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_; + // The start offsets of the dex cache arrays. SafeMap<const DexFile*, size_t> dex_cache_array_starts_; @@ -344,12 +360,21 @@ class ImageWriter FINAL { size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins. size_t bin_slot_count_[kBinSize]; // Number of objects in a bin - // ArtField relocating map, ArtFields are allocated as array of structs but we want to have one - // entry per art field for convenience. - // ArtFields are placed right after the end of the image objects (aka sum of bin_slot_sizes_). - std::unordered_map<ArtField*, uintptr_t> art_field_reloc_; + // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to + // have one entry per art field for convenience. ArtFields are placed right after the end of the + // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. + struct NativeObjectReloc { + uintptr_t offset; + Bin bin_type; + }; + std::unordered_map<void*, NativeObjectReloc> native_object_reloc_; + + // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. + ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; - void* string_data_array_; // The backing for the interned strings. + // Counters for measurements, used for logging only. + uint64_t dirty_methods_; + uint64_t clean_methods_; friend class FixupVisitor; friend class FixupClassVisitor; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 7ed70971a3..55fef9b721 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -16,6 +16,7 @@ #include "jit_compiler.h" +#include "art_method-inl.h" #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" #include "base/time_utils.h" @@ -27,7 +28,6 @@ #include "driver/compiler_options.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" -#include "mirror/art_method-inl.h" #include "oat_file-inl.h" #include "object_lock.h" #include "thread_list.h" @@ -54,7 +54,7 @@ extern "C" void jit_unload(void* handle) { delete reinterpret_cast<JitCompiler*>(handle); } -extern "C" bool jit_compile_method(void* handle, mirror::ArtMethod* method, Thread* self) +extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle); DCHECK(jit_compiler != nullptr); @@ -105,34 +105,33 @@ JitCompiler::JitCompiler() : total_time_(0) { JitCompiler::~JitCompiler() { } -bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) { +bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit)); const uint64_t start_time = NanoTime(); StackHandleScope<2> hs(self); self->AssertNoPendingException(); Runtime* runtime = Runtime::Current(); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(method)); if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) { VLOG(jit) << "Already compiled " << PrettyMethod(method); return true; // Already compiled } - Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass())); + Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass())); { TimingLogger::ScopedTiming t2("Initializing", &logger); if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { - VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get()); + VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method); return false; } } const DexFile* dex_file = h_class->GetDexCache()->GetDexFile(); - MethodReference method_ref(dex_file, h_method->GetDexMethodIndex()); + MethodReference method_ref(dex_file, method->GetDexMethodIndex()); // Only verify if we don't already have verification results. if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) { TimingLogger::ScopedTiming t2("Verifying", &logger); std::string error; - if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) == + if (verifier::MethodVerifier::VerifyMethod(method, true, &error) == verifier::MethodVerifier::kHardFailure) { - VLOG(jit) << "Not compile method " << PrettyMethod(h_method.Get()) + VLOG(jit) << "Not compile method " << PrettyMethod(method) << " due to verification failure " << error; return false; } @@ -140,7 +139,7 @@ bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) { CompiledMethod* compiled_method = nullptr; { TimingLogger::ScopedTiming t2("Compiling", &logger); - compiled_method = compiler_driver_->CompileMethod(self, h_method.Get()); + compiled_method = compiler_driver_->CompileMethod(self, method); } { TimingLogger::ScopedTiming t2("TrimMaps", &logger); @@ -154,16 +153,15 @@ bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) { // Don't add the method if we are supposed to be deoptimized. bool result = false; if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) { - const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor( - h_method.Get()); + const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method); if (code != nullptr) { // Already have some compiled code, just use this instead of linking. // TODO: Fix recompilation. - h_method->SetEntryPointFromQuickCompiledCode(code); + method->SetEntryPointFromQuickCompiledCode(code); result = true; } else { TimingLogger::ScopedTiming t2("MakeExecutable", &logger); - result = MakeExecutable(compiled_method, h_method.Get()); + result = MakeExecutable(compiled_method, method); } } // Remove the compiled method to save memory. @@ -205,7 +203,7 @@ uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_me return code_ptr; } -bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method, +bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, OatFile::OatMethod* out_method) { Runtime* runtime = Runtime::Current(); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); @@ -261,7 +259,7 @@ bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod return true; } -bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) { +bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) { CHECK(method != nullptr); CHECK(compiled_method != nullptr); OatFile::OatMethod oat_method(nullptr, 0); diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index d9a5ac63b0..b0010e0eb2 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -28,11 +28,8 @@ namespace art { -class InstructionSetFeatures; - -namespace mirror { class ArtMethod; -} +class InstructionSetFeatures; namespace jit { @@ -40,11 +37,11 @@ class JitCompiler { public: static JitCompiler* Create(); virtual ~JitCompiler(); - bool CompileMethod(Thread* self, mirror::ArtMethod* method) + bool CompileMethod(Thread* self, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // This is in the compiler since the runtime doesn't have access to the compiled method // structures. - bool AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method, + bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); CompilerCallbacks* GetCompilerCallbacks() const; size_t GetTotalCompileTime() const { @@ -65,7 +62,7 @@ class JitCompiler { uint8_t* WriteMethodHeaderAndCode( const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end, const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map); - bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) + bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc index eaf7872a05..09b6034ee8 100644 --- a/compiler/jni/jni_cfi_test_expected.inc +++ b/compiler/jni/jni_cfi_test_expected.inc @@ -85,8 +85,8 @@ static constexpr uint8_t expected_asm_kArm64[] = { 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9, 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D, 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xF5, 0x03, 0x12, 0xAA, - 0xE0, 0x03, 0x00, 0xB9, 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD, - 0xE2, 0xCF, 0x00, 0xB9, 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, + 0xE0, 0x03, 0x00, 0xF9, 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, + 0xE2, 0xD3, 0x00, 0xB9, 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, 0xE8, 0x27, 0x42, 0x6D, @@ -138,11 +138,11 @@ static constexpr uint8_t expected_cfi_kArm64[] = { // 0x0000002c: .cfi_offset_extended: r78 at cfa-112 // 0x0000002c: .cfi_offset_extended: r79 at cfa-104 // 0x0000002c: mov x21, tr -// 0x00000030: str w0, [sp] -// 0x00000034: str w1, [sp, #196] -// 0x00000038: str s0, [sp, #200] -// 0x0000003c: str w2, [sp, #204] -// 0x00000040: str w3, [sp, #208] +// 0x00000030: str x0, [sp] +// 0x00000034: str w1, [sp, #200] +// 0x00000038: str s0, [sp, #204] +// 0x0000003c: str w2, [sp, #208] +// 0x00000040: str w3, [sp, #212] // 0x00000044: sub sp, sp, #0x20 (32) // 0x00000048: .cfi_def_cfa_offset: 224 // 0x00000048: add sp, sp, #0x20 (32) @@ -238,20 +238,20 @@ static constexpr uint8_t expected_asm_kX86_64[] = { 0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83, 0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F, 0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2, - 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x89, 0x3C, 0x24, 0x89, 0xB4, 0x24, - 0x84, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x88, 0x00, 0x00, - 0x00, 0x89, 0x94, 0x24, 0x8C, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x90, - 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2, - 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, - 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x10, - 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, 0x41, - 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3, + 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4, + 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00, + 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, + 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, + 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, + 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, + 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, + 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3, }; static constexpr uint8_t expected_cfi_kX86_64[] = { 0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E, 0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86, 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0, - 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x65, 0x0E, + 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E, 0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, 0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, 0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, @@ -285,47 +285,47 @@ static constexpr uint8_t expected_cfi_kX86_64[] = { // 0x00000023: .cfi_offset: r30 at cfa-80 // 0x00000023: movsd [rsp + 40], xmm12 // 0x0000002a: .cfi_offset: r29 at cfa-88 -// 0x0000002a: mov [rsp], edi -// 0x0000002d: mov [rsp + 132], esi -// 0x00000034: movss [rsp + 136], xmm0 -// 0x0000003d: mov [rsp + 140], edx -// 0x00000044: mov [rsp + 144], ecx -// 0x0000004b: addq rsp, -32 -// 0x0000004f: .cfi_def_cfa_offset: 160 -// 0x0000004f: addq rsp, 32 -// 0x00000053: .cfi_def_cfa_offset: 128 -// 0x00000053: .cfi_remember_state -// 0x00000053: movsd xmm12, [rsp + 40] -// 0x0000005a: .cfi_restore: r29 -// 0x0000005a: movsd xmm13, [rsp + 48] -// 0x00000061: .cfi_restore: r30 -// 0x00000061: movsd xmm14, [rsp + 56] -// 0x00000068: .cfi_restore: r31 -// 0x00000068: movsd xmm15, [rsp + 64] -// 0x0000006f: .cfi_restore: r32 -// 0x0000006f: addq rsp, 72 -// 0x00000073: .cfi_def_cfa_offset: 56 -// 0x00000073: pop rbx -// 0x00000074: .cfi_def_cfa_offset: 48 -// 0x00000074: .cfi_restore: r3 -// 0x00000074: pop rbp -// 0x00000075: .cfi_def_cfa_offset: 40 -// 0x00000075: .cfi_restore: r6 -// 0x00000075: pop r12 -// 0x00000077: .cfi_def_cfa_offset: 32 -// 0x00000077: .cfi_restore: r12 -// 0x00000077: pop r13 -// 0x00000079: .cfi_def_cfa_offset: 24 -// 0x00000079: .cfi_restore: r13 -// 0x00000079: pop r14 -// 0x0000007b: .cfi_def_cfa_offset: 16 -// 0x0000007b: .cfi_restore: r14 -// 0x0000007b: pop r15 -// 0x0000007d: .cfi_def_cfa_offset: 8 -// 0x0000007d: .cfi_restore: r15 -// 0x0000007d: ret -// 0x0000007e: .cfi_restore_state -// 0x0000007e: .cfi_def_cfa_offset: 128 +// 0x0000002a: movq [rsp], rdi +// 0x0000002e: mov [rsp + 136], esi +// 0x00000035: movss [rsp + 140], xmm0 +// 0x0000003e: mov [rsp + 144], edx +// 0x00000045: mov [rsp + 148], ecx +// 0x0000004c: addq rsp, -32 +// 0x00000050: .cfi_def_cfa_offset: 160 +// 0x00000050: addq rsp, 32 +// 0x00000054: .cfi_def_cfa_offset: 128 +// 0x00000054: .cfi_remember_state +// 0x00000054: movsd xmm12, [rsp + 40] +// 0x0000005b: .cfi_restore: r29 +// 0x0000005b: movsd xmm13, [rsp + 48] +// 0x00000062: .cfi_restore: r30 +// 0x00000062: movsd xmm14, [rsp + 56] +// 0x00000069: .cfi_restore: r31 +// 0x00000069: movsd xmm15, [rsp + 64] +// 0x00000070: .cfi_restore: r32 +// 0x00000070: addq rsp, 72 +// 0x00000074: .cfi_def_cfa_offset: 56 +// 0x00000074: pop rbx +// 0x00000075: .cfi_def_cfa_offset: 48 +// 0x00000075: .cfi_restore: r3 +// 0x00000075: pop rbp +// 0x00000076: .cfi_def_cfa_offset: 40 +// 0x00000076: .cfi_restore: r6 +// 0x00000076: pop r12 +// 0x00000078: .cfi_def_cfa_offset: 32 +// 0x00000078: .cfi_restore: r12 +// 0x00000078: pop r13 +// 0x0000007a: .cfi_def_cfa_offset: 24 +// 0x0000007a: .cfi_restore: r13 +// 0x0000007a: pop r14 +// 0x0000007c: .cfi_def_cfa_offset: 16 +// 0x0000007c: .cfi_restore: r14 +// 0x0000007c: pop r15 +// 0x0000007e: .cfi_def_cfa_offset: 8 +// 0x0000007e: .cfi_restore: r15 +// 0x0000007e: ret +// 0x0000007f: .cfi_restore_state +// 0x0000007f: .cfi_def_cfa_offset: 128 static constexpr uint8_t expected_asm_kMips[] = { 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB8, 0xAF, @@ -400,7 +400,7 @@ static constexpr uint8_t expected_cfi_kMips[] = { // 0x0000006c: .cfi_restore: r31 // 0x0000006c: addiu r29, r29, 64 // 0x00000070: .cfi_def_cfa_offset: 0 -// 0x00000070: jalr r0, r31 +// 0x00000070: jr r31 // 0x00000074: nop // 0x00000078: .cfi_restore_state // 0x00000078: .cfi_def_cfa_offset: 64 @@ -409,8 +409,8 @@ static constexpr uint8_t expected_asm_kMips64[] = { 0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF, 0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF, 0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF, - 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xAF, 0x64, 0x00, 0xA5, 0xAF, - 0x68, 0x00, 0xAE, 0xE7, 0x6C, 0x00, 0xA7, 0xAF, 0x70, 0x00, 0xA8, 0xAF, + 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF, + 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF, 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF, 0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF, 0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF, @@ -445,11 +445,11 @@ static constexpr uint8_t expected_cfi_kMips64[] = { // 0x00000024: .cfi_offset: r19 at cfa-64 // 0x00000024: sd r18, +24(r29) // 0x00000028: .cfi_offset: r18 at cfa-72 -// 0x00000028: sw r4, +0(r29) -// 0x0000002c: sw r5, +100(r29) -// 0x00000030: swc1 f14, +104(r29) -// 0x00000034: sw r7, +108(r29) -// 0x00000038: sw r8, +112(r29) +// 0x00000028: sd r4, +0(r29) +// 0x0000002c: sw r5, +104(r29) +// 0x00000030: swc1 f14, +108(r29) +// 0x00000034: sw r7, +112(r29) +// 0x00000038: sw r8, +116(r29) // 0x0000003c: daddiu r29, r29, -32 // 0x00000040: .cfi_def_cfa_offset: 128 // 0x00000040: daddiu r29, r29, 32 @@ -479,4 +479,3 @@ static constexpr uint8_t expected_cfi_kMips64[] = { // 0x00000070: nop // 0x00000074: .cfi_restore_state // 0x00000074: .cfi_def_cfa_offset: 96 - diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 4186891a2a..e98e5724d0 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -18,6 +18,7 @@ #include <math.h> +#include "art_method-inl.h" #include "class_linker.h" #include "common_compiler_test.h" #include "dex_file.h" @@ -25,7 +26,6 @@ #include "indirect_reference_table.h" #include "jni_internal.h" #include "mem_map.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object_array-inl.h" @@ -65,12 +65,9 @@ class JniCompilerTest : public CommonCompilerTest { hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader))); // Compile the native method before starting the runtime mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader); - mirror::ArtMethod* method; - if (direct) { - method = c->FindDirectMethod(method_name, method_sig); - } else { - method = c->FindVirtualMethod(method_name, method_sig); - } + const auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) : + c->FindVirtualMethod(method_name, method_sig, pointer_size); ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig; if (check_generic_jni_) { method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub()); diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc index d3690b271f..9d2732aa2b 100644 --- a/compiler/jni/quick/arm/calling_convention_arm.cc +++ b/compiler/jni/quick/arm/calling_convention_arm.cc @@ -257,8 +257,7 @@ ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const { size_t ArmJniCallingConvention::FrameSize() { // Method*, LR and callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + - (2 + CalleeSaveRegisters().size()) * kFramePointerSize; + size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); // Plus return value spill area size diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc index 4344c90b98..b094747470 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.cc +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -99,8 +99,8 @@ ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() { FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = - FrameOffset(displacement_.Int32Value() + // displacement - sizeof(StackReference<mirror::ArtMethod>) + // Method ref + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref (itr_slots_ * sizeof(uint32_t))); // offset into in args return result; } @@ -206,7 +206,7 @@ ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const { size_t Arm64JniCallingConvention::FrameSize() { // Method*, callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + + size_t frame_data_size = kFramePointerSize + CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t); // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc index 2e146c4527..bb8136bfe1 100644 --- a/compiler/jni/quick/calling_convention.cc +++ b/compiler/jni/quick/calling_convention.cc @@ -131,7 +131,7 @@ size_t JniCallingConvention::ReferenceCount() const { FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const { size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header - return FrameOffset(HandleerencesOffset().Int32Value() + references_size); + return FrameOffset(HandleReferencesOffset().Int32Value() + references_size); } FrameOffset JniCallingConvention::ReturnValueSaveLocation() const { @@ -228,7 +228,7 @@ bool JniCallingConvention::IsCurrentParamALong() { FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() { CHECK(IsCurrentParamAReference()); CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset()); - int result = HandleerencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_; + int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_; CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value()); return FrameOffset(result); } diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index 0c64a36455..c9b595aeea 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -171,7 +171,7 @@ class CallingConvention { if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { - return frame_pointer_size_; // this argument + return sizeof(mirror::HeapReference<mirror::Object>); // this argument } size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param])); if (result >= 1 && result < 4) { @@ -196,7 +196,7 @@ class CallingConvention { unsigned int itr_float_and_doubles_; // Space for frames below this on the stack. FrameOffset displacement_; - // The size of a reference. + // The size of a pointer. const size_t frame_pointer_size_; // The size of a reference entry within the handle scope. const size_t handle_scope_pointer_size_; @@ -320,12 +320,13 @@ class JniCallingConvention : public CallingConvention { // Position of handle scope and interior fields FrameOffset HandleScopeOffset() const { - return FrameOffset(this->displacement_.Int32Value() + sizeof(StackReference<mirror::ArtMethod>)); + return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); // above Method reference } FrameOffset HandleScopeLinkOffset() const { - return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_)); + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::LinkOffset(frame_pointer_size_)); } FrameOffset HandleScopeNumRefsOffset() const { @@ -333,7 +334,7 @@ class JniCallingConvention : public CallingConvention { HandleScope::NumberOfReferencesOffset(frame_pointer_size_)); } - FrameOffset HandleerencesOffset() const { + FrameOffset HandleReferencesOffset() const { return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::ReferencesOffset(frame_pointer_size_)); } diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index a06303d23e..0347c5e8c1 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -21,6 +21,7 @@ #include <vector> #include <fstream> +#include "art_method.h" #include "base/logging.h" #include "base/macros.h" #include "calling_convention.h" @@ -31,7 +32,6 @@ #include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "jni_env_ext.h" -#include "mirror/art_method.h" #include "utils/assembler.h" #include "utils/managed_register.h" #include "utils/arm/managed_register_arm.h" @@ -117,18 +117,18 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, if (is_64_bit_target) { __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(), - Thread::TopHandleScopeOffset<8>(), - mr_conv->InterproceduralScratchRegister()); + Thread::TopHandleScopeOffset<8>(), + mr_conv->InterproceduralScratchRegister()); __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(), - main_jni_conv->HandleScopeOffset(), - mr_conv->InterproceduralScratchRegister()); + main_jni_conv->HandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); } else { __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(), - Thread::TopHandleScopeOffset<4>(), - mr_conv->InterproceduralScratchRegister()); + Thread::TopHandleScopeOffset<4>(), + mr_conv->InterproceduralScratchRegister()); __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(), - main_jni_conv->HandleScopeOffset(), - mr_conv->InterproceduralScratchRegister()); + main_jni_conv->HandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); } // 3. Place incoming reference arguments into handle scope @@ -138,10 +138,10 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); // Check handle scope offset is within frame CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); - // Note this LoadRef() already includes the heap poisoning negation. + // Note this LoadRef() doesn't need heap poisoning since its from the ArtMethod. // Note this LoadRef() does not include read barrier. It will be handled below. __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), - mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset()); + mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister()); main_jni_conv->Next(); // in handle scope so move to next argument @@ -251,12 +251,11 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, if (main_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, - mr_conv->InterproceduralScratchRegister(), - false); + mr_conv->InterproceduralScratchRegister(), false); } else { ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, - ManagedRegister::NoRegister(), false); + ManagedRegister::NoRegister(), false); } main_jni_conv->Next(); } @@ -264,10 +263,10 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); if (is_64_bit_target) { __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64), - main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->InterproceduralScratchRegister()); } else { __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32), - main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->InterproceduralScratchRegister()); } } else { __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), @@ -347,15 +346,15 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); if (is_64_bit_target) { __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(), - main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->InterproceduralScratchRegister()); } else { __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(), - main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->InterproceduralScratchRegister()); } } // 9. Plant call to native code associated with method. - MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset( + MemberOffset jni_entrypoint_offset = ArtMethod::EntryPointFromJniOffset( InstructionSetPointerSize(instruction_set)); __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset, mr_conv->InterproceduralScratchRegister()); diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc index aefbf06fd7..4e716b54de 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.cc +++ b/compiler/jni/quick/mips/calling_convention_mips.cc @@ -148,7 +148,7 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const { size_t MipsJniCallingConvention::FrameSize() { // Method*, LR and callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + + size_t frame_data_size = kMipsPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc index d446867d32..3a11bcfe9c 100644 --- a/compiler/jni/quick/mips64/calling_convention_mips64.cc +++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc @@ -84,9 +84,9 @@ ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() { FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = - FrameOffset(displacement_.Int32Value() + // displacement - sizeof(StackReference<mirror::ArtMethod>) + // Method ref - (itr_slots_ * sizeof(uint32_t))); // offset into in args + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref + (itr_slots_ * sizeof(uint32_t))); // offset into in args return result; } @@ -149,7 +149,7 @@ ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const { size_t Mips64JniCallingConvention::FrameSize() { // Mehtod* and callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + + size_t frame_data_size = kFramePointerSize + CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t); // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index 499dd7cf58..322caca41f 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -180,7 +180,7 @@ uint32_t X86JniCallingConvention::CoreSpillMask() const { size_t X86JniCallingConvention::FrameSize() { // Method*, return address and callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + + size_t frame_data_size = kX86PointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc index 7e92d12ce8..9c7eab1cc7 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -97,9 +97,9 @@ ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() { } FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { - return FrameOffset(displacement_.Int32Value() + // displacement - sizeof(StackReference<mirror::ArtMethod>) + // Method ref - (itr_slots_ * sizeof(uint32_t))); // offset into in args + return FrameOffset(displacement_.Int32Value() + // displacement + kX86_64PointerSize + // Method ref + itr_slots_ * sizeof(uint32_t)); // offset into in args } const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() { @@ -149,7 +149,7 @@ uint32_t X86_64JniCallingConvention::FpSpillMask() const { size_t X86_64JniCallingConvention::FrameSize() { // Method*, return address and callee save area size, local reference segment state - size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) + + size_t frame_data_size = kX86_64PointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc index b17cbca2d2..d0104300d3 100644 --- a/compiler/linker/arm/relative_patcher_thumb2.cc +++ b/compiler/linker/arm/relative_patcher_thumb2.cc @@ -16,8 +16,8 @@ #include "linker/arm/relative_patcher_thumb2.h" +#include "art_method.h" #include "compiled_method.h" -#include "mirror/art_method.h" #include "utils/arm/assembler_thumb2.h" namespace art { @@ -80,7 +80,7 @@ std::vector<uint8_t> Thumb2RelativePatcher::CompileThunkCode() { arm::Thumb2Assembler assembler; assembler.LoadFromOffset( arm::kLoadWord, arm::PC, arm::R0, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); assembler.bkpt(0); std::vector<uint8_t> thunk_code(assembler.CodeSize()); MemoryRegion code(thunk_code.data(), thunk_code.size()); diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index 72ddf07089..ee48789ad2 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -17,9 +17,9 @@ #include "linker/arm64/relative_patcher_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" #include "compiled_method.h" #include "driver/compiler_driver.h" -#include "mirror/art_method.h" #include "utils/arm64/assembler_arm64.h" #include "oat.h" #include "output_stream.h" @@ -158,6 +158,8 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, uint32_t insn = GetInsn(code, literal_offset); uint32_t pc_insn_offset = patch.PcInsnOffset(); uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu); + bool wide = (insn & 0x40000000) != 0; + uint32_t shift = wide ? 3u : 2u; if (literal_offset == pc_insn_offset) { // Check it's an ADRP with imm == 0 (unset). DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u) @@ -173,7 +175,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, uint32_t out_disp = thunk_offset - patch_offset; DCHECK_EQ(out_disp & 3u, 0u); DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed. - insn = (out_disp & 0x0fffffffu) >> 2; + insn = (out_disp & 0x0fffffffu) >> shift; insn |= 0x14000000; // B <thunk> uint32_t back_disp = -out_disp; @@ -194,7 +196,8 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, // Write the new ADRP (or B to the erratum 843419 thunk). SetInsn(code, literal_offset, insn); } else { - DCHECK_EQ(insn & 0xfffffc00, 0xb9400000); // LDR 32-bit with imm12 == 0 (unset). + // LDR 32-bit or 64-bit with imm12 == 0 (unset). + DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << insn; if (kIsDebugBuild) { uint32_t adrp = GetInsn(code, pc_insn_offset); if ((adrp & 0x9f000000u) != 0x90000000u) { @@ -216,7 +219,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register. } - uint32_t imm12 = (disp & 0xfffu) >> 2; + uint32_t imm12 = (disp & 0xfffu) >> shift; insn = (insn & ~(0xfffu << 10)) | (imm12 << 10); SetInsn(code, literal_offset, insn); } @@ -226,7 +229,7 @@ std::vector<uint8_t> Arm64RelativePatcher::CompileThunkCode() { // The thunk just uses the entry point in the ArtMethod. This works even for calls // to the generic JNI and interpreter trampolines. arm64::Arm64Assembler assembler; - Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArm64PointerSize).Int32Value()); assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); // Ensure we emit the literal pool. diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index a871a82d95..074775633f 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -15,6 +15,7 @@ */ #include "arch/instruction_set_features.h" +#include "art_method-inl.h" #include "class_linker.h" #include "common_compiler_test.h" #include "compiled_method.h" @@ -26,7 +27,6 @@ #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" @@ -41,7 +41,7 @@ class OatTest : public CommonCompilerTest { protected: static const bool kCompile = false; // DISABLED_ due to the time to compile libcore - void CheckMethod(mirror::ArtMethod* method, + void CheckMethod(ArtMethod* method, const OatFile::OatMethod& oat_method, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -140,16 +140,18 @@ TEST_F(OatTest, WriteRead) { ASSERT_TRUE(oat_dex_file != nullptr); CHECK_EQ(dex_file.GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum()); ScopedObjectAccess soa(Thread::Current()); + auto pointer_size = class_linker->GetImagePointerSize(); for (size_t i = 0; i < dex_file.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); const uint8_t* class_data = dex_file.GetClassData(class_def); + size_t num_virtual_methods = 0; if (class_data != nullptr) { ClassDataItemIterator it(dex_file, class_data); num_virtual_methods = it.NumVirtualMethods(); } + const char* descriptor = dex_file.GetClassDescriptor(class_def); - StackHandleScope<1> hs(soa.Self()); mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, NullHandle<mirror::ClassLoader>()); @@ -159,14 +161,19 @@ TEST_F(OatTest, WriteRead) { oat_class.GetType()) << descriptor; size_t method_index = 0; - for (size_t j = 0; j < klass->NumDirectMethods(); j++, method_index++) { - CheckMethod(klass->GetDirectMethod(j), - oat_class.GetOatMethod(method_index), dex_file); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file); + ++method_index; } - for (size_t j = 0; j < num_virtual_methods; j++, method_index++) { - CheckMethod(klass->GetVirtualMethod(j), - oat_class.GetOatMethod(method_index), dex_file); + size_t visited_virtuals = 0; + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + if (!m.IsMiranda()) { + CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file); + ++method_index; + ++visited_virtuals; + } } + EXPECT_EQ(visited_virtuals, num_virtual_methods); } } diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 15b4017816..633bf64171 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -19,6 +19,7 @@ #include <zlib.h> #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method-inl.h" #include "base/allocator.h" #include "base/bit_vector.h" #include "base/stl_util.h" @@ -33,7 +34,6 @@ #include "gc/space/space.h" #include "image_writer.h" #include "linker/relative_patcher.h" -#include "mirror/art_method-inl.h" #include "mirror/array.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" @@ -620,10 +620,9 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor { ScopedObjectAccessUnchecked soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_))); - mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache, - NullHandle<mirror::ClassLoader>(), - NullHandle<mirror::ArtMethod>(), - invoke_type); + ArtMethod* method = linker->ResolveMethod( + *dex_file_, it.GetMemberIndex(), dex_cache, NullHandle<mirror::ClassLoader>(), nullptr, + invoke_type); if (method == nullptr) { LOG(ERROR) << "Unexpected failure to resolve a method: " << PrettyMethod(it.GetMemberIndex(), *dex_file_, true); @@ -755,8 +754,8 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { uint32_t target_offset = GetTargetOffset(patch); PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset); } else if (patch.Type() == kLinkerPatchMethod) { - mirror::ArtMethod* method = GetTargetMethod(patch); - PatchObjectAddress(&patched_code_, patch.LiteralOffset(), method); + ArtMethod* method = GetTargetMethod(patch); + PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method); } else if (patch.Type() == kLinkerPatchType) { mirror::Class* type = GetTargetType(patch); PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type); @@ -794,12 +793,13 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation(); } - mirror::ArtMethod* GetTargetMethod(const LinkerPatch& patch) + ArtMethod* GetTargetMethod(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { MethodReference ref = patch.TargetMethod(); mirror::DexCache* dex_cache = (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file); - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(ref.dex_method_index); + ArtMethod* method = dex_cache->GetResolvedMethod( + ref.dex_method_index, class_linker_->GetImagePointerSize()); CHECK(method != nullptr); return method; } @@ -810,7 +810,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { (target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u; // If there's no compiled code, point to the correct trampoline. if (UNLIKELY(target_offset == 0)) { - mirror::ArtMethod* target = GetTargetMethod(patch); + ArtMethod* target = GetTargetMethod(patch); DCHECK(target != nullptr); size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet()); const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size); @@ -865,6 +865,23 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { data[3] = (address >> 24) & 0xffu; } + void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // NOTE: Direct method pointers across oat files don't use linker patches. However, direct + // type pointers across oat files do. (TODO: Investigate why.) + if (writer_->image_writer_ != nullptr) { + method = writer_->image_writer_->GetImageMethodAddress(method); + } + // Note: We only patch ArtMethods to low 4gb since thats where the image is. + uint32_t address = PointerToLowMemUInt32(method); + DCHECK_LE(offset + 4, code->size()); + uint8_t* data = &(*code)[offset]; + data[0] = address & 0xffu; + data[1] = (address >> 8) & 0xffu; + data[2] = (address >> 16) & 0xffu; + data[3] = (address >> 24) & 0xffu; + } + void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t address = writer_->image_writer_ == nullptr ? target_offset : diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index a5c6f23343..58416ee93b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -665,9 +665,8 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, *dex_compilation_unit_->GetDexFile()))); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader()))); - mirror::ArtMethod* resolved_method = compiler_driver_->ResolveMethod( - soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, - optimized_invoke_type); + ArtMethod* resolved_method = compiler_driver_->ResolveMethod( + soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, optimized_invoke_type); if (resolved_method == nullptr) { MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 0e776b31f7..a5d5305836 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -114,18 +114,24 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) { return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue(); } +size_t CodeGenerator::GetCachePointerOffset(uint32_t index) { + auto pointer_size = InstructionSetPointerSize(GetInstructionSet()); + return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index; +} + void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) { Initialize(); if (!is_leaf) { MarkNotLeaf(); } + const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet()); InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs() + GetGraph()->GetTemporariesVRegSlots() + 1 /* filler */, 0, /* the baseline compiler does not have live registers at slow path */ 0, /* the baseline compiler does not have live registers at slow path */ GetGraph()->GetMaximumNumberOfOutVRegs() - + 1 /* current method */, + + (is_64_bit ? 2 : 1) /* current method */, GetGraph()->GetBlocks()); CompileInternal(allocator, /* is_baseline */ true); } @@ -270,7 +276,8 @@ int32_t CodeGenerator::GetStackSlot(HLocal* local) const { uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); if (reg_number >= number_of_locals) { // Local is a parameter of the method. It is stored in the caller's frame. - return GetFrameSize() + kVRegSize // ART method + // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode. + return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet()) // ART method + (reg_number - number_of_locals) * kVRegSize; } else { // Local is a temporary in this method. It is stored in this method's frame. diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index bdbd571133..c6317f18d3 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -145,7 +145,7 @@ class CodeGenerator { size_t GetStackSlotOfParameter(HParameterValue* parameter) const { // Note that this follows the current calling convention. return GetFrameSize() - + kVRegSize // Art method + + InstructionSetPointerSize(GetInstructionSet()) // Art method + parameter->GetIndex() * kVRegSize; } @@ -266,6 +266,8 @@ class CodeGenerator { // Note: this method assumes we always have the same pointer size, regardless // of the architecture. static size_t GetCacheOffset(uint32_t index); + // Pointer variant for ArtMethod and ArtField arrays. + size_t GetCachePointerOffset(uint32_t index); void EmitParallelMoves(Location from1, Location to1, @@ -469,11 +471,13 @@ class CallingConvention { CallingConvention(const C* registers, size_t number_of_registers, const F* fpu_registers, - size_t number_of_fpu_registers) + size_t number_of_fpu_registers, + size_t pointer_size) : registers_(registers), number_of_registers_(number_of_registers), fpu_registers_(fpu_registers), - number_of_fpu_registers_(number_of_fpu_registers) {} + number_of_fpu_registers_(number_of_fpu_registers), + pointer_size_(pointer_size) {} size_t GetNumberOfRegisters() const { return number_of_registers_; } size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; } @@ -490,8 +494,8 @@ class CallingConvention { size_t GetStackOffsetOf(size_t index) const { // We still reserve the space for parameters passed by registers. - // Add one for the method pointer. - return (index + 1) * kVRegSize; + // Add space for the method pointer. + return pointer_size_ + index * kVRegSize; } private: @@ -499,6 +503,7 @@ class CallingConvention { const size_t number_of_registers_; const F* fpu_registers_; const size_t number_of_fpu_registers_; + const size_t pointer_size_; DISALLOW_COPY_AND_ASSIGN(CallingConvention); }; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 13775fed40..2b1131d65f 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -17,13 +17,13 @@ #include "code_generator_arm.h" #include "arch/arm/instruction_set_features_arm.h" +#include "art_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "intrinsics.h" #include "intrinsics_arm.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "thread.h" #include "utils/arm/assembler_arm.h" #include "utils/arm/managed_register_arm.h" @@ -1312,8 +1312,8 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { } Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArmPointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1326,7 +1326,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); @@ -1346,8 +1346,8 @@ void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1365,7 +1365,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); @@ -3796,12 +3796,12 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value()); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); __ LoadFromOffset( - kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); + kLoadWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( @@ -3858,7 +3858,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { Register out = load->GetLocations()->Out().AsRegister<Register>(); codegen_->LoadCurrentMethod(out); - __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); __ cmp(out, ShifterOperand(0)); @@ -4081,7 +4081,7 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset()); // LR = temp[offset_of_quick_compiled_code] __ LoadFromOffset(kLoadWord, LR, temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value()); // LR() __ blx(LR); @@ -4091,14 +4091,13 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; __ LoadFromOffset( - kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); + kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); // temp = temp[index_in_cache] __ LoadFromOffset( kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); // LR = temp[offset_of_quick_compiled_code] - __ LoadFromOffset(kLoadWord, LR, temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArmWordSize).Int32Value()); + __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArmWordSize).Int32Value()); // LR() __ blx(LR); } else { diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 1a498e1148..c410fa80ba 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -54,7 +54,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegis : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kArmPointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -72,7 +73,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, SRegister> : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFpuRegisters, - kParameterFpuRegistersLength) {} + kParameterFpuRegistersLength, + kArmPointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 0222f93da4..55ef66fa99 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -17,6 +17,7 @@ #include "code_generator_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" #include "common_arm64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" @@ -24,8 +25,7 @@ #include "intrinsics.h" #include "intrinsics_arm64.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "offsets.h" #include "thread.h" #include "utils/arm64/assembler_arm64.h" @@ -65,7 +65,6 @@ using helpers::WRegisterFrom; using helpers::XRegisterFrom; using helpers::ARM64EncodableConstantOrRegister; -static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); static constexpr int kCurrentMethodStackOffset = 0; inline Condition ARM64Condition(IfCondition cond) { @@ -968,7 +967,7 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type, void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { DCHECK(RequiresCurrentMethod()); - DCHECK(current_method.IsW()); + CHECK(current_method.IsX()); __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); } @@ -1940,12 +1939,12 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); - Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); // The register ip1 is required to be used for the hidden argument in // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. @@ -1957,16 +1956,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // temp = object->GetClass(); if (receiver.IsStackSlot()) { - __ Ldr(temp, StackOperandFrom(receiver)); - __ Ldr(temp, HeapOperand(temp, class_offset)); + __ Ldr(temp.W(), StackOperandFrom(receiver)); + __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset)); } else { - __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); + __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - __ Ldr(temp, HeapOperand(temp, method_offset)); + __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); - __ Ldr(lr, HeapOperand(temp, entry_point)); + __ Ldr(lr, MemOperand(temp, entry_point.Int32Value())); // lr(); __ Blr(lr); DCHECK(!codegen_->IsLeafMethod()); @@ -2007,8 +2006,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention. DCHECK(temp.Is(kArtMethodRegister)); - size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + - invoke->GetDexMethodIndex() * kHeapRefSize; + size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex()); // TODO: Implement all kinds of calls: // 1) boot -> boot @@ -2019,23 +2017,24 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok if (invoke->IsStringInit()) { // temp = thread->string_init_entrypoint - __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset())); + __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset())); // LR = temp->entry_point_from_quick_compiled_code_; - __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArm64WordSize))); + __ Ldr(lr, MemOperand( + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value())); // lr() __ Blr(lr); } else { // temp = method; - LoadCurrentMethod(temp); + LoadCurrentMethod(temp.X()); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); + __ Ldr(temp.W(), MemOperand(temp.X(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); // temp = temp[index_in_cache]; - __ Ldr(temp, HeapOperand(temp, index_in_cache)); + __ Ldr(temp.X(), MemOperand(temp, index_in_cache)); // lr = temp->entry_point_from_quick_compiled_code_; - __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArm64WordSize))); + __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64WordSize).Int32Value())); // lr(); __ Blr(lr); } else { @@ -2056,7 +2055,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir } BlockPoolsScope block_pools(GetVIXLAssembler()); - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); codegen_->GenerateStaticOrDirectCall(invoke, temp); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } @@ -2068,27 +2067,27 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); - size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); + size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArm64PointerSize).SizeValue(); Offset class_offset = mirror::Object::ClassOffset(); - Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); BlockPoolsScope block_pools(GetVIXLAssembler()); // temp = object->GetClass(); if (receiver.IsStackSlot()) { - __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); - __ Ldr(temp, HeapOperand(temp, class_offset)); + __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex())); + __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset)); } else { DCHECK(receiver.IsRegister()); - __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); + __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - __ Ldr(temp, HeapOperand(temp, method_offset)); + __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); - __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); + __ Ldr(lr, MemOperand(temp, entry_point.SizeValue())); // lr(); __ Blr(lr); DCHECK(!codegen_->IsLeafMethod()); @@ -2107,12 +2106,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( @@ -2159,8 +2158,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { codegen_->AddSlowPath(slow_path); Register out = OutputRegister(load); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value())); __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ Cbz(out, slow_path->GetEntryLabel()); @@ -2288,7 +2287,7 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { locations->SetOut(LocationFrom(x0)); locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, - void*, uint32_t, int32_t, mirror::ArtMethod*>(); + void*, uint32_t, int32_t, ArtMethod*>(); } void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { @@ -2296,17 +2295,16 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { InvokeRuntimeCallingConvention calling_convention; Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); DCHECK(type_index.Is(w0)); - Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); - DCHECK(current_method.Is(w2)); - codegen_->LoadCurrentMethod(current_method); + Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimLong); + DCHECK(current_method.Is(x2)); + codegen_->LoadCurrentMethod(current_method.X()); __ Mov(type_index, instruction->GetTypeIndex()); codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), nullptr); - CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, - void*, uint32_t, int32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>(); } void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { @@ -2316,7 +2314,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); } void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { @@ -2325,14 +2323,14 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) DCHECK(type_index.Is(w0)); Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); DCHECK(current_method.Is(w1)); - codegen_->LoadCurrentMethod(current_method); + codegen_->LoadCurrentMethod(current_method.X()); __ Mov(type_index, instruction->GetTypeIndex()); codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), nullptr); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); } void LocationsBuilderARM64::VisitNot(HNot* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 8aeea5400f..3486cdebec 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -45,7 +45,7 @@ static const vixl::FPRegister kParameterFPRegisters[] = { static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters); const vixl::Register tr = vixl::x18; // Thread Register -static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke. +static const vixl::Register kArtMethodRegister = vixl::x0; // Method register on invoke. const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1); const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31); @@ -94,7 +94,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register, : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kArm64PointerSize) {} Location GetReturnLocation(Primitive::Type return_type); @@ -108,7 +109,8 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFPRegisters, - kParameterFPRegistersLength) {} + kParameterFPRegistersLength, + kArm64PointerSize) {} Location GetReturnLocation(Primitive::Type return_type) { return ARM64ReturnLocation(return_type); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 2848a48a64..60fd29bf74 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -16,6 +16,7 @@ #include "code_generator_x86.h" +#include "art_method.h" #include "code_generator_utils.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" @@ -23,8 +24,7 @@ #include "intrinsics.h" #include "intrinsics_x86.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "thread.h" #include "utils/assembler.h" #include "utils/stack_checks.h" @@ -1275,8 +1275,8 @@ void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) { void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kX86PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1292,7 +1292,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address( - temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); @@ -1307,8 +1307,8 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1328,7 +1328,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // temp = temp->GetImtEntryAt(method_offset); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86WordSize).Int32Value())); DCHECK(!codegen_->IsLeafMethod()); @@ -3207,18 +3207,19 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() __ call(Address( - temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); } else { // temp = method; LoadCurrentMethod(temp); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); + __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + __ movl(temp, Address(temp, + CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()))); // (temp + offset_of_quick_compiled_code)() __ call(Address(temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); } else { __ call(GetFrameEntryLabel()); } @@ -4278,11 +4279,11 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( @@ -4337,7 +4338,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { Register out = load->GetLocations()->Out().AsRegister<Register>(); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ testl(out, out); diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 5a5a37b3fe..43214fe7d5 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -52,7 +52,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmReg : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kX86PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -64,7 +65,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegiste kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFpuRegisters, - kParameterFpuRegistersLength) {} + kParameterFpuRegistersLength, + kX86PointerSize) {} RegisterPair GetRegisterPairAt(size_t argument_index) { DCHECK_LT(argument_index + 1, GetNumberOfRegisters()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e633970279..b0174b9b16 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -16,14 +16,14 @@ #include "code_generator_x86_64.h" +#include "art_method.h" #include "code_generator_utils.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "intrinsics.h" #include "intrinsics_x86_64.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "mirror/object_reference.h" #include "thread.h" #include "utils/assembler.h" @@ -374,18 +374,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo // temp = thread->string_init_entrypoint __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } else { // temp = method; LoadCurrentMethod(temp); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); + __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + __ movq(temp, Address( + temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()))); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } else { __ call(&frame_entry_label_); @@ -545,7 +546,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() { } } - __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI)); + __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI)); } void CodeGeneratorX86_64::GenerateFrameExit() { @@ -585,7 +586,7 @@ void CodeGeneratorX86_64::Bind(HBasicBlock* block) { void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) { DCHECK(RequiresCurrentMethod()); - __ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset)); + __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset)); } Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const { @@ -1383,8 +1384,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) } CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>(); - size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -1397,9 +1398,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - __ movl(temp, Address(temp, method_offset)); + __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); @@ -1415,8 +1416,8 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -1434,9 +1435,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - __ movl(temp, Address(temp, method_offset)); + __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); @@ -4125,11 +4126,11 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); @@ -4174,7 +4175,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { CpuRegister out = load->GetLocations()->Out().AsRegister<CpuRegister>(); codegen_->LoadCurrentMethod(CpuRegister(out)); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ testl(out, out); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 480ea6b9c9..4be401a0fa 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -50,7 +50,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatR : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kX86_64PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -62,7 +63,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFloatRegisters, - kParameterFloatRegistersLength) {} + kParameterFloatRegistersLength, + kX86_64PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index d88424cb5e..8253a43389 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -16,6 +16,7 @@ #include "inliner.h" +#include "art_method-inl.h" #include "builder.h" #include "class_linker.h" #include "constant_folding.h" @@ -23,7 +24,6 @@ #include "driver/compiler_driver-inl.h" #include "driver/dex_compilation_unit.h" #include "instruction_simplifier.h" -#include "mirror/art_method-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" @@ -81,11 +81,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file))); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader()))); - Handle<mirror::ArtMethod> resolved_method(hs.NewHandle( - compiler_driver_->ResolveMethod( - soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type))); + ArtMethod* resolved_method(compiler_driver_->ResolveMethod( + soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type)); - if (resolved_method.Get() == nullptr) { + if (resolved_method == nullptr) { VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file); return false; } @@ -149,7 +148,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, return true; } -bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, +bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, HInvoke* invoke_instruction, uint32_t method_index, bool can_use_dex_cache) const { @@ -172,6 +171,7 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, graph_->GetArena(), caller_dex_file, method_index, + compiler_driver_->GetInstructionSet(), graph_->IsDebuggable(), graph_->GetCurrentInstructionId()); diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 1dbc7d392b..831bdf22a0 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -48,7 +48,7 @@ class HInliner : public HOptimization { private: bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const; - bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, + bool TryBuildAndInline(ArtMethod* resolved_method, HInvoke* invoke_instruction, uint32_t method_index, bool can_use_dex_cache) const; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index dccfe9a0ca..db35b8f767 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -17,11 +17,11 @@ #include "intrinsics_arm.h" #include "arch/arm/instruction_set_features_arm.h" +#include "art_method.h" #include "code_generator_arm.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/arm/assembler_arm.h" diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 2c4fab0465..957373f6f9 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -17,12 +17,12 @@ #include "intrinsics_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" #include "code_generator_arm64.h" #include "common_arm64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/arm64/assembler_arm64.h" diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 28b7a07cf9..989dd0df30 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -19,11 +19,11 @@ #include <limits> #include "arch/x86/instruction_set_features_x86.h" +#include "art_method.h" #include "code_generator_x86.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/x86/assembler_x86.h" diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 0efa714a23..c245cb646f 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -19,11 +19,11 @@ #include <limits> #include "arch/x86_64/instruction_set_features_x86_64.h" +#include "art_method-inl.h" #include "code_generator_x86_64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/x86_64/assembler_x86_64.h" diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 77b587e74f..ef60d7680b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -120,6 +120,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { HGraph(ArenaAllocator* arena, const DexFile& dex_file, uint32_t method_idx, + InstructionSet instruction_set, bool debuggable = false, int start_instruction_id = 0) : arena_(arena), @@ -137,6 +138,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { current_instruction_id_(start_instruction_id), dex_file_(dex_file), method_idx_(method_idx), + instruction_set_(instruction_set), cached_null_constant_(nullptr), cached_int_constants_(std::less<int32_t>(), arena->Adapter()), cached_float_constants_(std::less<int32_t>(), arena->Adapter()), @@ -359,6 +361,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { // The method index in the dex file. const uint32_t method_idx_; + const InstructionSet instruction_set_; + // Cached constants. HNullConstant* cached_null_constant_; ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_; diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 7aea249c42..b0d1433667 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -31,7 +31,7 @@ namespace art { // Run the tests only on host. #ifndef HAVE_ANDROID_OS -class OptimizingCFITest : public CFITest { +class OptimizingCFITest : public CFITest { public: // Enable this flag to generate the expected outputs. static constexpr bool kGenerateExpected = false; diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc index 2125f6eb01..9ccc0113f6 100644 --- a/compiler/optimizing/optimizing_cfi_test_expected.inc +++ b/compiler/optimizing/optimizing_cfi_test_expected.inc @@ -32,7 +32,7 @@ static constexpr uint8_t expected_cfi_kThumb2[] = { // 0x00000012: .cfi_def_cfa_offset: 64 static constexpr uint8_t expected_asm_kArm64[] = { - 0xE0, 0x0F, 0x1C, 0xB8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9, + 0xE0, 0x0F, 0x1C, 0xF8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9, 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6, }; @@ -41,7 +41,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = { 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40, }; -// 0x00000000: str w0, [sp, #-64]! +// 0x00000000: str x0, [sp, #-64]! // 0x00000004: .cfi_def_cfa_offset: 64 // 0x00000004: stp x19, x20, [sp, #40] // 0x00000008: .cfi_offset: r19 at cfa-24 @@ -99,13 +99,13 @@ static constexpr uint8_t expected_cfi_kX86[] = { static constexpr uint8_t expected_asm_kX86_64[] = { 0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, - 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x89, 0x3C, 0x24, 0xF2, - 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, - 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3, + 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24, + 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, + 0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3, }; static constexpr uint8_t expected_cfi_kX86_64[] = { 0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E, - 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x43, 0x0A, 0x47, 0xDD, 0x47, + 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6, 0x41, 0x0B, 0x0E, 0x40, }; @@ -121,21 +121,20 @@ static constexpr uint8_t expected_cfi_kX86_64[] = { // 0x0000000d: .cfi_offset: r30 at cfa-32 // 0x0000000d: movsd [rsp + 24], xmm12 // 0x00000014: .cfi_offset: r29 at cfa-40 -// 0x00000014: mov [rsp], edi -// 0x00000017: .cfi_remember_state -// 0x00000017: movsd xmm12, [rsp + 24] -// 0x0000001e: .cfi_restore: r29 -// 0x0000001e: movsd xmm13, [rsp + 32] -// 0x00000025: .cfi_restore: r30 -// 0x00000025: addq rsp, 40 -// 0x00000029: .cfi_def_cfa_offset: 24 -// 0x00000029: pop rbx -// 0x0000002a: .cfi_def_cfa_offset: 16 -// 0x0000002a: .cfi_restore: r3 -// 0x0000002a: pop rbp -// 0x0000002b: .cfi_def_cfa_offset: 8 -// 0x0000002b: .cfi_restore: r6 -// 0x0000002b: ret -// 0x0000002c: .cfi_restore_state -// 0x0000002c: .cfi_def_cfa_offset: 64 - +// 0x00000014: movq [rsp], rdi +// 0x00000018: .cfi_remember_state +// 0x00000018: movsd xmm12, [rsp + 24] +// 0x0000001f: .cfi_restore: r29 +// 0x0000001f: movsd xmm13, [rsp + 32] +// 0x00000026: .cfi_restore: r30 +// 0x00000026: addq rsp, 40 +// 0x0000002a: .cfi_def_cfa_offset: 24 +// 0x0000002a: pop rbx +// 0x0000002b: .cfi_def_cfa_offset: 16 +// 0x0000002b: .cfi_restore: r3 +// 0x0000002b: pop rbp +// 0x0000002c: .cfi_def_cfa_offset: 8 +// 0x0000002c: .cfi_restore: r6 +// 0x0000002c: ret +// 0x0000002d: .cfi_restore_state +// 0x0000002d: .cfi_def_cfa_offset: 64 diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 8bb5d8ebae..c7b2c67019 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -19,6 +19,7 @@ #include <fstream> #include <stdint.h> +#include "art_method-inl.h" #include "base/arena_allocator.h" #include "base/dumpable.h" #include "base/timing_logger.h" @@ -44,7 +45,6 @@ #include "intrinsics.h" #include "licm.h" #include "jni/quick/jni_compiler.h" -#include "mirror/art_method-inl.h" #include "nodes.h" #include "prepare_for_register_allocation.h" #include "reference_type_propagation.h" @@ -196,7 +196,7 @@ class OptimizingCompiler FINAL : public Compiler { return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); } - uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE + uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); @@ -514,7 +514,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite ArenaAllocator arena(Runtime::Current()->GetArenaPool()); HGraph* graph = new (&arena) HGraph( - &arena, dex_file, method_idx, compiler_driver->GetCompilerOptions().GetDebuggable()); + &arena, dex_file, method_idx, compiler_driver->GetInstructionSet(), + compiler_driver->GetCompilerOptions().GetDebuggable()); // For testing purposes, we put a special marker on method names that should be compiled // with this compiler. This makes sure we're not regressing. diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 4f8ec65e43..3ef96faab3 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -74,7 +74,8 @@ void RemoveSuspendChecks(HGraph* graph) { inline HGraph* CreateGraph(ArenaAllocator* allocator) { return new (allocator) HGraph( - allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1); + allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, kRuntimeISA, + false); } // Create a control-flow graph from Dex instructions. diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 12b1c2b9bd..e93e06118c 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -16,7 +16,7 @@ #include "reference_type_propagation.h" -#include "class_linker.h" +#include "class_linker-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "scoped_thread_state_change.h" diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index f53f846326..5f439c86d9 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -71,7 +71,9 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters()); // Always reserve for the current method and the graph's max out registers. // TODO: compute it instead. - reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs(); + // ArtMethod* takes 2 vregs for 64 bits. + reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize + + codegen->GetGraph()->GetMaximumNumberOfOutVRegs(); } bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED, diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 1da0563264..cbbc116033 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -378,7 +378,7 @@ static dwarf::Reg DWARFReg(SRegister reg) { return dwarf::Reg::ArmFp(static_cast<int>(reg)); } -constexpr size_t kFramePointerSize = 4; +constexpr size_t kFramePointerSize = kArmPointerSize; void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, const std::vector<ManagedRegister>& callee_save_regs, @@ -415,7 +415,7 @@ void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, StoreToOffset(kStoreWord, R0, SP, 0); // Write out entry spills. - int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); + int32_t offset = frame_size + kFramePointerSize; for (size_t i = 0; i < entry_spills.size(); ++i) { ArmManagedRegister reg = entry_spills.at(i).AsArm(); if (reg.IsNoRegister()) { @@ -528,13 +528,13 @@ void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src, StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { +void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) { ArmManagedRegister dst = mdest.AsArm(); CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst; LoadFromOffset(kLoadWord, dst.AsCoreRegister(), base.AsArm().AsCoreRegister(), offs.Int32Value()); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0)); } } diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index ce4c741918..c673c6b81e 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -693,9 +693,10 @@ class ArmAssembler : public Assembler { void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 3ee79a103f..7d98a30ff3 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -293,14 +293,14 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value()); } -void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, - MemberOffset offs) { +void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs, + bool poison_reference) { Arm64ManagedRegister dst = m_dst.AsArm64(); Arm64ManagedRegister base = m_base.AsArm64(); CHECK(dst.IsXRegister() && base.IsXRegister()); LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), offs.Int32Value()); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { WRegister ref_reg = dst.AsOverlappingWRegister(); ___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg))); } @@ -535,7 +535,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; // Call *(*(SP + base) + offset) - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, base.Int32Value()); + LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value()); LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value()); ___ Blr(reg_x(scratch.AsXRegister())); } @@ -544,8 +544,9 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; } -void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs, - ManagedRegister m_in_reg, bool null_allowed) { +void Arm64Assembler::CreateHandleScopeEntry( + ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg, + bool null_allowed) { Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); // For now we only hold stale handle scope entries in x registers. @@ -571,7 +572,7 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs } void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset, - ManagedRegister m_scratch, bool null_allowed) { + ManagedRegister m_scratch, bool null_allowed) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; if (null_allowed) { @@ -590,7 +591,7 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han } void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, - ManagedRegister m_in_reg) { + ManagedRegister m_in_reg) { Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); CHECK(out_reg.IsXRegister()) << out_reg; @@ -706,7 +707,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, // Increase frame to required size. DCHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>)); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); IncreaseFrameSize(frame_size); // Save callee-saves. @@ -720,13 +721,12 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR))); ___ Mov(reg_x(ETR), reg_x(TR)); - // Write StackReference<Method>. + // Write ArtMethod* DCHECK(X0 == method_reg.AsArm64().AsXRegister()); - DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); - StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0); + StoreToOffset(X0, SP, 0); // Write out entry spills - int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); + int32_t offset = frame_size + kArm64PointerSize; for (size_t i = 0; i < entry_spills.size(); ++i) { Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); if (reg.IsNoRegister()) { @@ -768,7 +768,7 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, // For now we only check that the size of the frame is large enough to hold spills and method // reference. - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>)); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); DCHECK_ALIGNED(frame_size, kStackAlignment); // Note: This is specific to JNI method frame. diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index b1b66ed49a..fa9faed66b 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -113,8 +113,9 @@ class Arm64Assembler FINAL : public Assembler { // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE; diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 2e3a47bb91..672e1503be 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -434,8 +434,10 @@ class Assembler { virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size); virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size); - virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) = 0; + virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; + // If poison_reference is true and kPoisonReference is true, then we negate the read reference. + virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool poison_reference) = 0; virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; diff --git a/compiler/utils/dex_cache_arrays_layout-inl.h b/compiler/utils/dex_cache_arrays_layout-inl.h index a71eeceafb..fec981a03c 100644 --- a/compiler/utils/dex_cache_arrays_layout-inl.h +++ b/compiler/utils/dex_cache_arrays_layout-inl.h @@ -25,12 +25,6 @@ #include "mirror/array-inl.h" #include "primitive.h" -namespace mirror { -class ArtMethod; -class Class; -class String; -} // namespace mirror - namespace art { inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file) @@ -40,7 +34,7 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const Dex strings_offset_(methods_offset_ + MethodsSize(dex_file->NumMethodIds())), fields_offset_(strings_offset_ + StringsSize(dex_file->NumStringIds())), size_(fields_offset_ + FieldsSize(dex_file->NumFieldIds())) { - DCHECK(pointer_size == 4u || pointer_size == 8u); + DCHECK(ValidPointerSize(pointer_size)) << pointer_size; } inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const { @@ -52,12 +46,11 @@ inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const { } inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const { - return methods_offset_ + ElementOffset( - sizeof(mirror::HeapReference<mirror::ArtMethod>), method_idx); + return methods_offset_ + ElementOffset(pointer_size_, method_idx); } inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const { - return ArraySize(sizeof(mirror::HeapReference<mirror::ArtMethod>), num_elements); + return ArraySize(pointer_size_, num_elements); } inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const { diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index e769489479..e55b461127 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -696,13 +696,13 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value()); } -void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { +void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) { MipsManagedRegister dest = mdest.AsMips(); CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); LoadFromOffset(kLoadWord, dest.AsCoreRegister(), base.AsMips().AsCoreRegister(), offs.Int32Value()); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister()); } } diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 34713e1305..7b0fc39d17 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -189,9 +189,10 @@ class MipsAssembler FINAL : public Assembler { void LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index b95e436897..a8b55d1097 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -601,10 +601,10 @@ void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } // Write out Method*. - StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0); + StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0); // Write out entry spills. - int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); + int32_t offset = frame_size + kFramePointerSize; for (size_t i = 0; i < entry_spills.size(); ++i) { Mips64ManagedRegister reg = entry_spills.at(i).AsMips64(); ManagedRegisterSpill spill = entry_spills.at(i); @@ -750,12 +750,13 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value()); } -void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) { +void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) { Mips64ManagedRegister dest = mdest.AsMips64(); CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister()); LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), base.AsMips64().AsGpuRegister(), offs.Int32Value()); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister()); } } @@ -1004,7 +1005,7 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr Mips64ManagedRegister scratch = mscratch.AsMips64(); CHECK(scratch.IsGpuRegister()) << scratch; // Call *(*(SP + base) + offset) - LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), + LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, base.Int32Value()); LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), scratch.AsGpuRegister(), offset.Int32Value()); diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 95ba967646..38419abbac 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -188,9 +188,10 @@ class Mips64Assembler FINAL : public Assembler { void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 7e7520066d..390d46ede4 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1724,9 +1724,9 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } // return address then method on stack. - int32_t adjust = frame_size - (gpr_count * kFramePointerSize) - - sizeof(StackReference<mirror::ArtMethod>) /*method*/ - - kFramePointerSize /*return address*/; + int32_t adjust = frame_size - gpr_count * kFramePointerSize - + kFramePointerSize /*method*/ - + kFramePointerSize /*return address*/; addl(ESP, Immediate(-adjust)); cfi_.AdjustCFAOffset(adjust); pushl(method_reg.AsX86().AsCpuRegister()); @@ -1750,12 +1750,11 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } } -void X86Assembler::RemoveFrame(size_t frame_size, - const std::vector<ManagedRegister>& spill_regs) { +void X86Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) { CHECK_ALIGNED(frame_size, kStackAlignment); cfi_.RememberState(); - int adjust = frame_size - (spill_regs.size() * kFramePointerSize) - - sizeof(StackReference<mirror::ArtMethod>); + // -kFramePointerSize for ArtMethod*. + int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize; addl(ESP, Immediate(adjust)); cfi_.AdjustCFAOffset(-adjust); for (size_t i = 0; i < spill_regs.size(); ++i) { @@ -1904,18 +1903,18 @@ void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, } } -void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { +void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister()); movl(dest.AsCpuRegister(), Address(ESP, src)); } -void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { +void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { negl(dest.AsCpuRegister()); } } diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 5319dacab7..1c1c023711 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -538,9 +538,10 @@ class X86Assembler FINAL : public Assembler { void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index feceecac68..ac95c7127a 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -2388,9 +2388,9 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } } - DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); + DCHECK_EQ(kX86_64PointerSize, kFramePointerSize); - movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); + movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); for (size_t i = 0; i < entry_spills.size(); ++i) { ManagedRegisterSpill spill = entry_spills.at(i); @@ -2590,18 +2590,18 @@ void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> sr } } -void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { +void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister()); movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src)); } -void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, - MemberOffset offs) { +void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool poison_reference) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs)); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { negl(dest.AsCpuRegister()); } } @@ -2667,8 +2667,7 @@ void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t s } } -void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch) { +void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); CHECK(scratch.IsCpuRegister()); movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src)); @@ -2693,9 +2692,8 @@ void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister()); } -void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch, - size_t size) { +void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, + size_t size) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); if (scratch.IsCpuRegister() && size == 8) { Load(scratch, src, 4); @@ -2834,7 +2832,7 @@ void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister(); - movl(scratch, Address(CpuRegister(RSP), base)); + movq(scratch, Address(CpuRegister(RSP), base)); call(Address(scratch, offset)); } diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 7daf994900..6b2b65d6c1 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -668,7 +668,8 @@ class X86_64Assembler FINAL : public Assembler { void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool poison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index dcffe35113..b86bc85489 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -1127,7 +1127,7 @@ std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBU ssize_t displacement = static_cast<ssize_t>(frame_size) - (spill_regs.size() * 8 + 8); str << "subq $" << displacement << ", %rsp\n"; // 3) Store method reference. - str << "movl %edi, (%rsp)\n"; + str << "movq %rdi, (%rsp)\n"; // 4) Entry spills. str << "movq %rax, " << frame_size + 0 << "(%rsp)\n"; str << "movq %rbx, " << frame_size + 8 << "(%rsp)\n"; diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index e0f367ed8b..7fa4328c9b 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -35,6 +35,7 @@ #define ATRACE_TAG ATRACE_TAG_DALVIK #include <cutils/trace.h> +#include "art_method-inl.h" #include "arch/instruction_set_features.h" #include "arch/mips/instruction_set_features_mips.h" #include "base/dumpable.h" @@ -61,7 +62,6 @@ #include "image_writer.h" #include "interpreter/unstarted_runtime.h" #include "leb128.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index 1056fe15e0..f32488133c 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -24,13 +24,13 @@ #include <set> #include <map> +#include "art_method-inl.h" #include "base/unix_file/fd_file.h" #include "base/stringprintf.h" #include "gc/space/image_space.h" #include "gc/heap.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "mirror/art_method-inl.h" #include "image.h" #include "scoped_thread_state_change.h" #include "os.h" @@ -171,9 +171,8 @@ class ImgDiagDumper { const uint8_t* image_begin_unaligned = boot_image_header.GetImageBegin(); const uint8_t* image_mirror_end_unaligned = image_begin_unaligned + - boot_image_header.GetImageSize(); - const uint8_t* image_end_unaligned = image_mirror_end_unaligned + - boot_image_header.GetArtFieldsSize(); + boot_image_header.GetImageSection(ImageHeader::kSectionObjects).Size(); + const uint8_t* image_end_unaligned = image_begin_unaligned + boot_image_header.GetImageSize(); // Adjust range to nearest page const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize); @@ -335,7 +334,7 @@ class ImgDiagDumper { std::map<mirror::Class*, std::string> class_to_descriptor_map; std::map<off_t /* field offset */, int /* count */> art_method_field_dirty_count; - std::vector<mirror::ArtMethod*> art_method_dirty_objects; + std::vector<ArtMethod*> art_method_dirty_objects; std::map<off_t /* field offset */, int /* count */> class_field_dirty_count; std::vector<mirror::Class*> class_dirty_objects; @@ -437,7 +436,7 @@ class ImgDiagDumper { class_dirty_objects.push_back(obj_as_class); } else if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) { // this is an ArtMethod - mirror::ArtMethod* art_method = reinterpret_cast<mirror::ArtMethod*>(remote_obj); + ArtMethod* art_method = reinterpret_cast<ArtMethod*>(remote_obj); // print the fields that are dirty for (size_t i = 0; i < obj->SizeOf(); ++i) { @@ -533,7 +532,7 @@ class ImgDiagDumper { const auto& dirty_objects_list = dirty_objects_by_class[klass]; for (mirror::Object* obj : dirty_objects_list) { // remote method - auto art_method = reinterpret_cast<mirror::ArtMethod*>(obj); + auto art_method = reinterpret_cast<ArtMethod*>(obj); // remote class mirror::Class* remote_declaring_class = @@ -551,7 +550,7 @@ class ImgDiagDumper { art_method->GetEntryPointFromJniPtrSize(pointer_size)) << ", "; os << " entryPointFromInterpreter: " << reinterpret_cast<const void*>( - art_method->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size)) + art_method->GetEntryPointFromInterpreterPtrSize(pointer_size)) << ", "; os << " entryPointFromQuickCompiledCode: " << reinterpret_cast<const void*>( @@ -623,7 +622,7 @@ class ImgDiagDumper { os << " field contents:\n"; for (mirror::Object* obj : art_method_false_dirty_objects) { // local method - auto art_method = reinterpret_cast<mirror::ArtMethod*>(obj); + auto art_method = reinterpret_cast<ArtMethod*>(obj); // local class mirror::Class* declaring_class = art_method->GetDeclaringClass(); @@ -634,7 +633,7 @@ class ImgDiagDumper { art_method->GetEntryPointFromJniPtrSize(pointer_size)) << ", "; os << " entryPointFromInterpreter: " << reinterpret_cast<const void*>( - art_method->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size)) + art_method->GetEntryPointFromInterpreterPtrSize(pointer_size)) << ", "; os << " entryPointFromQuickCompiledCode: " << reinterpret_cast<const void*>( diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index a67ef8f0b7..3712f1aa75 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -27,6 +27,7 @@ #include "arch/instruction_set_features.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "class_linker-inl.h" @@ -41,7 +42,6 @@ #include "image.h" #include "indenter.h" #include "mapping_table.h" -#include "mirror/art_method-inl.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" @@ -64,14 +64,16 @@ namespace art { -const char* image_roots_descriptions_[] = { +const char* image_methods_descriptions_[] = { "kResolutionMethod", "kImtConflictMethod", "kImtUnimplementedMethod", - "kDefaultImt", "kCalleeSaveMethod", "kRefsOnlySaveMethod", "kRefsAndArgsSaveMethod", +}; + +const char* image_roots_descriptions_[] = { "kDexCaches", "kClassRoots", }; @@ -494,7 +496,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetQuickOatCode(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const void* GetQuickOatCode(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; CHECK(oat_dex_file != nullptr); @@ -1311,12 +1313,9 @@ class OatDumper { Handle<mirror::DexCache> dex_cache( hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file))); DCHECK(options_.class_loader_ != nullptr); - return verifier::MethodVerifier::VerifyMethodAndDump(soa.Self(), os, dex_method_idx, dex_file, - dex_cache, - *options_.class_loader_, - &class_def, code_item, - NullHandle<mirror::ArtMethod>(), - method_access_flags); + return verifier::MethodVerifier::VerifyMethodAndDump( + soa.Self(), os, dex_method_idx, dex_file, dex_cache, *options_.class_loader_, &class_def, + code_item, nullptr, method_access_flags); } return nullptr; @@ -1378,8 +1377,12 @@ class ImageDumper { os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n"; - os << "IMAGE BITMAP OFFSET: " << reinterpret_cast<void*>(image_header_.GetImageBitmapOffset()) - << " SIZE: " << reinterpret_cast<void*>(image_header_.GetImageBitmapSize()) << "\n\n"; + os << "IMAGE SIZE: " << image_header_.GetImageSize() << "\n\n"; + + for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) { + auto section = static_cast<ImageHeader::ImageSections>(i); + os << "IMAGE SECTION " << section << ": " << image_header_.GetImageSection(section) << "\n\n"; + } os << "OAT CHECKSUM: " << StringPrintf("0x%08x\n\n", image_header_.GetOatChecksum()); @@ -1399,7 +1402,8 @@ class ImageDumper { os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots()) << "\n"; Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent1_os(&indent1_filter); - CHECK_EQ(arraysize(image_roots_descriptions_), size_t(ImageHeader::kImageRootsMax)); + static_assert(arraysize(image_roots_descriptions_) == + static_cast<size_t>(ImageHeader::kImageRootsMax), "sizes must match"); for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { ImageHeader::ImageRoot image_root = static_cast<ImageHeader::ImageRoot>(i); const char* image_root_description = image_roots_descriptions_[i]; @@ -1434,6 +1438,16 @@ class ImageDumper { } } } + + os << "METHOD ROOTS\n"; + static_assert(arraysize(image_methods_descriptions_) == + static_cast<size_t>(ImageHeader::kImageMethodsCount), "sizes must match"); + for (int i = 0; i < ImageHeader::kImageMethodsCount; i++) { + auto image_root = static_cast<ImageHeader::ImageMethod>(i); + const char* description = image_methods_descriptions_[i]; + auto* image_method = image_header_.GetImageMethod(image_root); + indent1_os << StringPrintf("%s: %p\n", description, image_method); + } } os << "\n"; @@ -1493,12 +1507,37 @@ class ImageDumper { Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent_os(&indent_filter); os_ = &indent_os; + + // Mark dex caches. + dex_cache_arrays_.clear(); + { + ReaderMutexLock mu(self, *class_linker->DexLock()); + for (size_t i = 0; i < class_linker->GetDexCacheCount(); ++i) { + auto* dex_cache = class_linker->GetDexCache(i); + dex_cache_arrays_.insert(dex_cache->GetResolvedFields()); + dex_cache_arrays_.insert(dex_cache->GetResolvedMethods()); + } + } ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); for (const auto& space : spaces) { if (space->IsImageSpace()) { - gc::space::ImageSpace* image_space = space->AsImageSpace(); + auto* image_space = space->AsImageSpace(); + // Dump the normal objects before ArtMethods. image_space->GetLiveBitmap()->Walk(ImageDumper::Callback, this); indent_os << "\n"; + // TODO: Dump fields. + // Dump methods after. + const auto& methods_section = image_header_.GetMethodsSection(); + const auto pointer_size = + InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()); + const auto method_size = ArtMethod::ObjectSize(pointer_size); + for (size_t pos = 0; pos < methods_section.Size(); pos += method_size) { + auto* method = reinterpret_cast<ArtMethod*>( + image_space->Begin() + pos + methods_section.Offset()); + indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n"; + DumpMethod(method, this, indent_os); + indent_os << "\n"; + } } } // Dump the large objects separately. @@ -1515,11 +1554,16 @@ class ImageDumper { stats_.file_bytes = file->GetLength(); } size_t header_bytes = sizeof(ImageHeader); + const auto& bitmap_section = image_header_.GetImageSection(ImageHeader::kSectionImageBitmap); + const auto& field_section = image_header_.GetImageSection(ImageHeader::kSectionArtFields); + const auto& method_section = image_header_.GetMethodsSection(); stats_.header_bytes = header_bytes; size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes; stats_.alignment_bytes += alignment_bytes; - stats_.alignment_bytes += image_header_.GetImageBitmapOffset() - image_header_.GetImageSize(); - stats_.bitmap_bytes += image_header_.GetImageBitmapSize(); + stats_.alignment_bytes += bitmap_section.Offset() - image_header_.GetImageSize(); + stats_.bitmap_bytes += bitmap_section.Size(); + stats_.art_field_bytes += field_section.Size(); + stats_.art_method_bytes += method_section.Size(); stats_.Dump(os); os << "\n"; @@ -1541,9 +1585,6 @@ class ImageDumper { } else if (type->IsClassClass()) { mirror::Class* klass = value->AsClass(); os << StringPrintf("%p Class: %s\n", klass, PrettyDescriptor(klass).c_str()); - } else if (type->IsArtMethodClass()) { - mirror::ArtMethod* method = value->AsArtMethod(); - os << StringPrintf("%p Method: %s\n", method, PrettyMethod(method).c_str()); } else { os << StringPrintf("%p %s\n", value, PrettyDescriptor(type).c_str()); } @@ -1618,7 +1659,7 @@ class ImageDumper { return image_space_.Contains(object); } - const void* GetQuickOatCodeBegin(mirror::ArtMethod* m) + const void* GetQuickOatCodeBegin(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet())); @@ -1631,7 +1672,7 @@ class ImageDumper { return quick_code; } - uint32_t GetQuickOatCodeSize(mirror::ArtMethod* m) + uint32_t GetQuickOatCodeSize(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { @@ -1640,7 +1681,7 @@ class ImageDumper { return oat_code_begin[-1]; } - const void* GetQuickOatCodeEnd(mirror::ArtMethod* m) + const void* GetQuickOatCodeEnd(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { @@ -1649,8 +1690,7 @@ class ImageDumper { return oat_code_begin + GetQuickOatCodeSize(m); } - static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(obj != nullptr); DCHECK(arg != nullptr); ImageDumper* state = reinterpret_cast<ImageDumper*>(arg); @@ -1672,9 +1712,6 @@ class ImageDumper { mirror::Class* klass = obj->AsClass(); os << StringPrintf("%p: java.lang.Class \"%s\" (", obj, PrettyDescriptor(klass).c_str()) << klass->GetStatus() << ")\n"; - } else if (obj->IsArtMethod()) { - os << StringPrintf("%p: java.lang.reflect.ArtMethod %s\n", obj, - PrettyMethod(obj->AsArtMethod()).c_str()); } else if (obj_class->IsStringClass()) { os << StringPrintf("%p: java.lang.String %s\n", obj, PrintableString(obj->AsString()->ToModifiedUtf8().c_str()).c_str()); @@ -1684,10 +1721,11 @@ class ImageDumper { Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent_os(&indent_filter); DumpFields(indent_os, obj, obj_class); + const auto image_pointer_size = + InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet()); if (obj->IsObjectArray()) { - mirror::ObjectArray<mirror::Object>* obj_array = obj->AsObjectArray<mirror::Object>(); - int32_t length = obj_array->GetLength(); - for (int32_t i = 0; i < length; i++) { + auto* obj_array = obj->AsObjectArray<mirror::Object>(); + for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) { mirror::Object* value = obj_array->Get(i); size_t run = 0; for (int32_t j = i + 1; j < length; j++) { @@ -1719,85 +1757,117 @@ class ImageDumper { PrintField(indent2_os, &sfields[i], sfields[i].GetDeclaringClass()); } } - } else if (obj->IsArtMethod()) { - const size_t image_pointer_size = InstructionSetPointerSize( - state->oat_dumper_->GetOatInstructionSet()); - mirror::ArtMethod* method = obj->AsArtMethod(); - if (method->IsNative()) { - DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method); - DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method); - bool first_occurrence; - const void* quick_oat_code = state->GetQuickOatCodeBegin(method); - uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method); - state->ComputeOatSize(quick_oat_code, &first_occurrence); - if (first_occurrence) { - state->stats_.native_to_managed_code_bytes += quick_oat_code_size; - } - if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize( - image_pointer_size)) { - indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code); - } - } else if (method->IsAbstract() || method->IsCalleeSaveMethod() || - method->IsResolutionMethod() || method->IsImtConflictMethod() || - method->IsImtUnimplementedMethod() || method->IsClassInitializer()) { - DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method); - DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method); - } else { - const DexFile::CodeItem* code_item = method->GetCodeItem(); - size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2; - state->stats_.dex_instruction_bytes += dex_instruction_bytes; - - bool first_occurrence; - size_t gc_map_bytes = - state->ComputeOatSize(method->GetNativeGcMap(image_pointer_size), &first_occurrence); - if (first_occurrence) { - state->stats_.gc_map_bytes += gc_map_bytes; - } - - size_t pc_mapping_table_bytes = - state->ComputeOatSize(method->GetMappingTable(image_pointer_size), &first_occurrence); - if (first_occurrence) { - state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes; - } - - size_t vmap_table_bytes = - state->ComputeOatSize(method->GetVmapTable(image_pointer_size), &first_occurrence); - if (first_occurrence) { - state->stats_.vmap_table_bytes += vmap_table_bytes; + } else { + auto it = state->dex_cache_arrays_.find(obj); + if (it != state->dex_cache_arrays_.end()) { + const auto& field_section = state->image_header_.GetImageSection( + ImageHeader::kSectionArtFields); + const auto& method_section = state->image_header_.GetMethodsSection(); + auto* arr = down_cast<mirror::PointerArray*>(obj); + for (int32_t i = 0, length = arr->GetLength(); i < length; i++) { + void* elem = arr->GetElementPtrSize<void*>(i, image_pointer_size); + size_t run = 0; + for (int32_t j = i + 1; j < length && + elem == arr->GetElementPtrSize<void*>(j, image_pointer_size); j++, run++) { } + if (run == 0) { + indent_os << StringPrintf("%d: ", i); + } else { + indent_os << StringPrintf("%d to %zd: ", i, i + run); + i = i + run; + } + auto offset = reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin(); + std::string msg; + if (field_section.Contains(offset)) { + msg = PrettyField(reinterpret_cast<ArtField*>(elem)); + } else if (method_section.Contains(offset)) { + msg = PrettyMethod(reinterpret_cast<ArtMethod*>(elem)); + } else { + msg = "Unknown type"; + } + indent_os << StringPrintf("%p %s\n", elem, msg.c_str()); } + } + } + std::string temp; + state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes); + } - const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method); - const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method); - uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method); - state->ComputeOatSize(quick_oat_code_begin, &first_occurrence); - if (first_occurrence) { - state->stats_.managed_code_bytes += quick_oat_code_size; - if (method->IsConstructor()) { - if (method->IsStatic()) { - state->stats_.class_initializer_code_bytes += quick_oat_code_size; - } else if (dex_instruction_bytes > kLargeConstructorDexBytes) { - state->stats_.large_initializer_code_bytes += quick_oat_code_size; - } - } else if (dex_instruction_bytes > kLargeMethodDexBytes) { - state->stats_.large_method_code_bytes += quick_oat_code_size; + void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(method != nullptr); + const auto image_pointer_size = + InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet()); + if (method->IsNative()) { + DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method); + DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method); + bool first_occurrence; + const void* quick_oat_code = state->GetQuickOatCodeBegin(method); + uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method); + state->ComputeOatSize(quick_oat_code, &first_occurrence); + if (first_occurrence) { + state->stats_.native_to_managed_code_bytes += quick_oat_code_size; + } + if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) { + indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code); + } + } else if (method->IsAbstract() || method->IsCalleeSaveMethod() || + method->IsResolutionMethod() || method->IsImtConflictMethod() || + method->IsImtUnimplementedMethod() || method->IsClassInitializer()) { + DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method); + DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method); + } else { + const DexFile::CodeItem* code_item = method->GetCodeItem(); + size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2; + state->stats_.dex_instruction_bytes += dex_instruction_bytes; + + bool first_occurrence; + size_t gc_map_bytes = state->ComputeOatSize( + method->GetNativeGcMap(image_pointer_size), &first_occurrence); + if (first_occurrence) { + state->stats_.gc_map_bytes += gc_map_bytes; + } + + size_t pc_mapping_table_bytes = state->ComputeOatSize( + method->GetMappingTable(image_pointer_size), &first_occurrence); + if (first_occurrence) { + state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes; + } + + size_t vmap_table_bytes = state->ComputeOatSize( + method->GetVmapTable(image_pointer_size), &first_occurrence); + if (first_occurrence) { + state->stats_.vmap_table_bytes += vmap_table_bytes; + } + + const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method); + const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method); + uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method); + state->ComputeOatSize(quick_oat_code_begin, &first_occurrence); + if (first_occurrence) { + state->stats_.managed_code_bytes += quick_oat_code_size; + if (method->IsConstructor()) { + if (method->IsStatic()) { + state->stats_.class_initializer_code_bytes += quick_oat_code_size; + } else if (dex_instruction_bytes > kLargeConstructorDexBytes) { + state->stats_.large_initializer_code_bytes += quick_oat_code_size; } + } else if (dex_instruction_bytes > kLargeMethodDexBytes) { + state->stats_.large_method_code_bytes += quick_oat_code_size; } - state->stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size; + } + state->stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size; - indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end); - indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd\n", - dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes); + indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end); + indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd\n", + dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes); - size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes + - vmap_table_bytes + quick_oat_code_size + object_bytes; + size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes + + vmap_table_bytes + quick_oat_code_size + ArtMethod::ObjectSize(image_pointer_size); - double expansion = - static_cast<double>(quick_oat_code_size) / static_cast<double>(dex_instruction_bytes); - state->stats_.ComputeOutliers(total_size, expansion, method); - } + double expansion = + static_cast<double>(quick_oat_code_size) / static_cast<double>(dex_instruction_bytes); + state->stats_.ComputeOutliers(total_size, expansion, method); } - std::string temp; - state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes); } std::set<const void*> already_seen_; @@ -1820,6 +1890,8 @@ class ImageDumper { size_t header_bytes; size_t object_bytes; + size_t art_field_bytes; + size_t art_method_bytes; size_t bitmap_bytes; size_t alignment_bytes; @@ -1837,7 +1909,7 @@ class ImageDumper { size_t dex_instruction_bytes; - std::vector<mirror::ArtMethod*> method_outlier; + std::vector<ArtMethod*> method_outlier; std::vector<size_t> method_outlier_size; std::vector<double> method_outlier_expansion; std::vector<std::pair<std::string, size_t>> oat_dex_file_sizes; @@ -1847,6 +1919,8 @@ class ImageDumper { file_bytes(0), header_bytes(0), object_bytes(0), + art_field_bytes(0), + art_method_bytes(0), bitmap_bytes(0), alignment_bytes(0), managed_code_bytes(0), @@ -1891,7 +1965,7 @@ class ImageDumper { return (static_cast<double>(size) / static_cast<double>(object_bytes)) * 100; } - void ComputeOutliers(size_t total_size, double expansion, mirror::ArtMethod* method) { + void ComputeOutliers(size_t total_size, double expansion, ArtMethod* method) { method_outlier_size.push_back(total_size); method_outlier_expansion.push_back(expansion); method_outlier.push_back(method); @@ -2004,16 +2078,21 @@ class ImageDumper { << "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n"; Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); std::ostream indent_os(&indent_filter); - indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n" - "object_bytes = %8zd (%2.0f%% of art file bytes)\n" - "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n" - "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n", + indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n" + "object_bytes = %8zd (%2.0f%% of art file bytes)\n" + "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n" + "art_method_bytes = %8zd (%2.0f%% of art file bytes)\n" + "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n" + "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n", header_bytes, PercentOfFileBytes(header_bytes), object_bytes, PercentOfFileBytes(object_bytes), + art_field_bytes, PercentOfFileBytes(art_field_bytes), + art_method_bytes, PercentOfFileBytes(art_method_bytes), bitmap_bytes, PercentOfFileBytes(bitmap_bytes), alignment_bytes, PercentOfFileBytes(alignment_bytes)) << std::flush; - CHECK_EQ(file_bytes, bitmap_bytes + header_bytes + object_bytes + alignment_bytes); + CHECK_EQ(file_bytes, header_bytes + object_bytes + art_field_bytes + art_method_bytes + + bitmap_bytes + alignment_bytes); } os << "object_bytes breakdown:\n"; @@ -2093,6 +2172,7 @@ class ImageDumper { const ImageHeader& image_header_; std::unique_ptr<OatDumper> oat_dumper_; OatDumperOptions* oat_dumper_options_; + std::set<mirror::Object*> dex_cache_arrays_; DISALLOW_COPY_AND_ASSIGN(ImageDumper); }; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index ef84a1717c..007125cfbe 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -25,6 +25,7 @@ #include <vector> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/dumpable.h" #include "base/scoped_flock.h" #include "base/stringpiece.h" @@ -35,8 +36,9 @@ #include "elf_file_impl.h" #include "gc/space/image_space.h" #include "image.h" -#include "mirror/art_method-inl.h" +#include "mirror/abstract_method.h" #include "mirror/object-inl.h" +#include "mirror/method.h" #include "mirror/reference.h" #include "noop_compiler_callbacks.h" #include "offsets.h" @@ -120,7 +122,7 @@ bool PatchOat::Patch(const std::string& image_location, off_t delta, } ImageHeader image_header; if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header), - sizeof(image_header), 0)) { + sizeof(image_header), 0)) { LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath(); return false; } @@ -416,12 +418,22 @@ bool PatchOat::ReplaceOatFileWithSymlink(const std::string& input_oat_filename, } void PatchOat::PatchArtFields(const ImageHeader* image_header) { - const size_t art_field_size = image_header->GetArtFieldsSize(); - const size_t art_field_offset = image_header->GetArtFieldsOffset(); - for (size_t pos = 0; pos < art_field_size; pos += sizeof(ArtField)) { - auto* field = reinterpret_cast<ArtField*>(heap_->Begin() + art_field_offset + pos); - auto* dest_field = RelocatedCopyOf(field); - dest_field->SetDeclaringClass(RelocatedAddressOfPointer(field->GetDeclaringClass())); + const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields); + for (size_t pos = 0; pos < section.Size(); pos += sizeof(ArtField)) { + auto* src = reinterpret_cast<ArtField*>(heap_->Begin() + section.Offset() + pos); + auto* dest = RelocatedCopyOf(src); + dest->SetDeclaringClass(RelocatedAddressOfPointer(src->GetDeclaringClass())); + } +} + +void PatchOat::PatchArtMethods(const ImageHeader* image_header) { + const auto& section = image_header->GetMethodsSection(); + const size_t pointer_size = InstructionSetPointerSize(isa_); + size_t method_size = ArtMethod::ObjectSize(pointer_size); + for (size_t pos = 0; pos < section.Size(); pos += method_size) { + auto* src = reinterpret_cast<ArtMethod*>(heap_->Begin() + section.Offset() + pos); + auto* dest = RelocatedCopyOf(src); + FixupMethod(src, dest); } } @@ -431,31 +443,35 @@ void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) { auto* dex_cache = dex_caches->GetWithoutChecks(i); auto* fields = dex_cache->GetResolvedFields(); - if (fields == nullptr) { - continue; + if (fields != nullptr) { + CHECK(!fields->IsObjectArray()); + CHECK(fields->IsArrayInstance()); + FixupNativePointerArray(fields); } - CHECK(!fields->IsObjectArray()); - CHECK(fields->IsArrayInstance()); - auto* component_type = fields->GetClass()->GetComponentType(); - if (component_type->IsPrimitiveInt()) { - mirror::IntArray* arr = fields->AsIntArray(); - mirror::IntArray* copy_arr = down_cast<mirror::IntArray*>(RelocatedCopyOf(arr)); - for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) { - auto f = arr->GetWithoutChecks(j); - if (f != 0) { - copy_arr->SetWithoutChecks<false>(j, f + delta_); - } - } - } else { - CHECK(component_type->IsPrimitiveLong()); - mirror::LongArray* arr = fields->AsLongArray(); - mirror::LongArray* copy_arr = down_cast<mirror::LongArray*>(RelocatedCopyOf(arr)); - for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) { - auto f = arr->GetWithoutChecks(j); - if (f != 0) { - copy_arr->SetWithoutChecks<false>(j, f + delta_); - } - } + auto* methods = dex_cache->GetResolvedMethods(); + if (methods != nullptr) { + CHECK(!methods->IsObjectArray()); + CHECK(methods->IsArrayInstance()); + FixupNativePointerArray(methods); + } + } +} + +void PatchOat::FixupNativePointerArray(mirror::PointerArray* object) { + if (object->IsIntArray()) { + mirror::IntArray* arr = object->AsIntArray(); + mirror::IntArray* copy_arr = down_cast<mirror::IntArray*>(RelocatedCopyOf(arr)); + for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) { + copy_arr->SetWithoutChecks<false>( + j, RelocatedAddressOfIntPointer(arr->GetWithoutChecks(j))); + } + } else { + CHECK(object->IsLongArray()); + mirror::LongArray* arr = object->AsLongArray(); + mirror::LongArray* copy_arr = down_cast<mirror::LongArray*>(RelocatedCopyOf(arr)); + for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) { + copy_arr->SetWithoutChecks<false>( + j, RelocatedAddressOfIntPointer(arr->GetWithoutChecks(j))); } } } @@ -470,6 +486,9 @@ bool PatchOat::PatchImage() { // Patch and update ArtFields. PatchArtFields(image_header); + // Patch and update ArtMethods. + PatchArtMethods(image_header); + // Patch dex file int/long arrays which point to ArtFields. PatchDexFileArrays(img_roots); @@ -545,40 +564,63 @@ void PatchOat::VisitObject(mirror::Object* object) { } PatchOat::PatchVisitor visitor(this, copy); object->VisitReferences<true, kVerifyNone>(visitor, visitor); - if (object->IsArtMethod<kVerifyNone>()) { - FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy)); - } else if (object->IsClass<kVerifyNone>()) { - mirror::Class* klass = down_cast<mirror::Class*>(object); - down_cast<mirror::Class*>(copy)->SetSFieldsUnchecked( - RelocatedAddressOfPointer(klass->GetSFields())); - down_cast<mirror::Class*>(copy)->SetIFieldsUnchecked( - RelocatedAddressOfPointer(klass->GetIFields())); + if (object->IsClass<kVerifyNone>()) { + auto* klass = object->AsClass(); + auto* copy_klass = down_cast<mirror::Class*>(copy); + copy_klass->SetSFieldsUnchecked(RelocatedAddressOfPointer(klass->GetSFields())); + copy_klass->SetIFieldsUnchecked(RelocatedAddressOfPointer(klass->GetIFields())); + copy_klass->SetDirectMethodsPtrUnchecked( + RelocatedAddressOfPointer(klass->GetDirectMethodsPtr())); + copy_klass->SetVirtualMethodsPtr(RelocatedAddressOfPointer(klass->GetVirtualMethodsPtr())); + auto* vtable = klass->GetVTable(); + if (vtable != nullptr) { + FixupNativePointerArray(vtable); + } + auto* iftable = klass->GetIfTable(); + if (iftable != nullptr) { + for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { + if (iftable->GetMethodArrayCount(i) > 0) { + auto* method_array = iftable->GetMethodArray(i); + CHECK(method_array != nullptr); + FixupNativePointerArray(method_array); + } + } + } + if (klass->ShouldHaveEmbeddedImtAndVTable()) { + const size_t pointer_size = InstructionSetPointerSize(isa_); + for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) { + copy_klass->SetEmbeddedVTableEntryUnchecked(i, RelocatedAddressOfPointer( + klass->GetEmbeddedVTableEntry(i, pointer_size)), pointer_size); + } + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + copy_klass->SetEmbeddedImTableEntry(i, RelocatedAddressOfPointer( + klass->GetEmbeddedImTableEntry(i, pointer_size)), pointer_size); + } + } + } + if (object->GetClass() == mirror::Method::StaticClass() || + object->GetClass() == mirror::Constructor::StaticClass()) { + // Need to go update the ArtMethod. + auto* dest = down_cast<mirror::AbstractMethod*>(copy); + auto* src = down_cast<mirror::AbstractMethod*>(object); + dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod())); } } -void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) { +void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) { const size_t pointer_size = InstructionSetPointerSize(isa_); + copy->CopyFrom(object, pointer_size); // Just update the entry points if it looks like we should. // TODO: sanity check all the pointers' values - uintptr_t quick= reinterpret_cast<uintptr_t>( - object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size)); - if (quick != 0) { - copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(quick + delta_), - pointer_size); - } - uintptr_t interpreter = reinterpret_cast<uintptr_t>( - object->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size)); - if (interpreter != 0) { - copy->SetEntryPointFromInterpreterPtrSize( - reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_), pointer_size); - } - - uintptr_t native_method = reinterpret_cast<uintptr_t>( - object->GetEntryPointFromJniPtrSize(pointer_size)); - if (native_method != 0) { - copy->SetEntryPointFromJniPtrSize(reinterpret_cast<void*>(native_method + delta_), - pointer_size); - } + copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass())); + copy->SetDexCacheResolvedMethods(RelocatedAddressOfPointer(object->GetDexCacheResolvedMethods())); + copy->SetDexCacheResolvedTypes(RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes())); + copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer( + object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size); + copy->SetEntryPointFromInterpreterPtrSize(RelocatedAddressOfPointer( + object->GetEntryPointFromInterpreterPtrSize(pointer_size)), pointer_size); + copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer( + object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size); } bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogger* timings, diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 8f16f6b283..7b9c8bd508 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -28,14 +28,15 @@ namespace art { +class ArtMethod; class ImageHeader; class OatHeader; namespace mirror { class Object; +class PointerArray; class Reference; class Class; -class ArtMethod; } // namespace mirror class PatchOat { @@ -99,7 +100,9 @@ class PatchOat { void VisitObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) + void FixupMethod(ArtMethod* object, ArtMethod* copy) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupNativePointerArray(mirror::PointerArray* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool InHeap(mirror::Object*); @@ -112,6 +115,7 @@ class PatchOat { bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -133,8 +137,28 @@ class PatchOat { template <typename T> T* RelocatedAddressOfPointer(T* obj) { - return obj == nullptr ? nullptr : - reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(obj) + delta_); + if (obj == nullptr) { + return obj; + } + auto ret = reinterpret_cast<uintptr_t>(obj) + delta_; + // Trim off high bits in case negative relocation with 64 bit patchoat. + if (InstructionSetPointerSize(isa_) == sizeof(uint32_t)) { + ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret)); + } + return reinterpret_cast<T*>(ret); + } + + template <typename T> + T RelocatedAddressOfIntPointer(T obj) { + if (obj == 0) { + return obj; + } + T ret = obj + delta_; + // Trim off high bits in case negative relocation with 64 bit patchoat. + if (InstructionSetPointerSize(isa_) == 4) { + ret = static_cast<T>(static_cast<uint32_t>(ret)); + } + return ret; } // Look up the oat header from any elf file. diff --git a/runtime/Android.mk b/runtime/Android.mk index a4fa24d43c..b38f9bc9a5 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -20,6 +20,7 @@ include art/build/Android.common_build.mk LIBART_COMMON_SRC_FILES := \ art_field.cc \ + art_method.cc \ atomic.cc.arm \ barrier.cc \ base/allocator.cc \ @@ -102,7 +103,6 @@ LIBART_COMMON_SRC_FILES := \ mem_map.cc \ memory_region.cc \ mirror/abstract_method.cc \ - mirror/art_method.cc \ mirror/array.cc \ mirror/class.cc \ mirror/dex_cache.cc \ @@ -317,6 +317,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ gc/space/region_space.h \ gc/space/space.h \ gc/heap.h \ + image.h \ instrumentation.h \ indirect_reference_table.h \ invoke_type.h \ diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc index 5733ab6361..40e2cd340f 100644 --- a/runtime/arch/arch_test.cc +++ b/runtime/arch/arch_test.cc @@ -16,8 +16,8 @@ #include <stdint.h> +#include "art_method-inl.h" #include "common_runtime_test.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -38,7 +38,7 @@ class ArchTest : public CommonRuntimeTest { t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods. r->SetInstructionSet(isa); - mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(); + ArtMethod* save_method = r->CreateCalleeSaveMethod(); r->SetCalleeSaveMethod(save_method, type); QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo(); EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for " diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc index c0e658ca47..403d348752 100644 --- a/runtime/arch/arm/context_arm.cc +++ b/runtime/arch/arm/context_arm.cc @@ -16,8 +16,8 @@ #include "context_arm.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -36,7 +36,7 @@ void ArmContext::Reset() { } void ArmContext::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc index d84cb5369b..90b0d5374c 100644 --- a/runtime/arch/arm/fault_handler_arm.cc +++ b/runtime/arch/arm/fault_handler_arm.cc @@ -18,13 +18,13 @@ #include "fault_handler.h" #include <sys/ucontext.h> + +#include "art_method-inl.h" #include "base/macros.h" #include "base/hex_dump.h" #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "thread.h" #include "thread-inl.h" @@ -65,7 +65,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info } void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context, - mirror::ArtMethod** out_method, + ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); @@ -81,10 +81,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>( reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm)); if (overflow_addr == fault_addr) { - *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->arm_r0); + *out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0); } else { // The method is at the top of the stack. - *out_method = reinterpret_cast<mirror::ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]); + *out_method = reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]); } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 748857870d..5ae291a44b 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -423,7 +423,7 @@ ENTRY art_quick_invoke_stub_internal mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval #endif - ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code + ldr ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code blx ip @ call the method mov sp, r11 @ restore the stack pointer @@ -895,7 +895,7 @@ END art_quick_proxy_invoke_handler */ ENTRY art_quick_imt_conflict_trampoline ldr r0, [sp, #0] @ load caller Method* - ldr r0, [r0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods + ldr r0, [r0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods add r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET @ get starting address of data ldr r0, [r0, r12, lsl 2] @ load the target method b art_quick_invoke_interface_trampoline diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc index a3acd7e10a..ce531f0709 100644 --- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc +++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc @@ -14,23 +14,23 @@ * limitations under the License. */ -#include "mirror/art_method.h" +#include "art_method.h" #include "utils.h" // For RoundUp(). namespace art { // Assembly stub that does the final part of the up-call into Java. -extern "C" void art_quick_invoke_stub_internal(mirror::ArtMethod*, uint32_t*, uint32_t, +extern "C" void art_quick_invoke_stub_internal(ArtMethod*, uint32_t*, uint32_t, Thread* self, JValue* result, uint32_t, uint32_t*, uint32_t*); template <bool kIsStatic> -static void quick_invoke_reg_setup(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size, +static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size, Thread* self, JValue* result, const char* shorty) { // Note: We do not follow aapcs ABI in quick code for both softfp and hardfp. uint32_t core_reg_args[4]; // r0 ~ r3 uint32_t fp_reg_args[16]; // s0 ~ s15 (d0 ~ d7) - uint32_t gpr_index = 1; // Index into core registers. Reserve r0 for mirror::ArtMethod*. + uint32_t gpr_index = 1; // Index into core registers. Reserve r0 for ArtMethod*. uint32_t fpr_index = 0; // Index into float registers. uint32_t fpr_double_index = 0; // Index into float registers for doubles. uint32_t arg_index = 0; // Index into argument array. @@ -99,16 +99,16 @@ static void quick_invoke_reg_setup(mirror::ArtMethod* method, uint32_t* args, ui core_reg_args, fp_reg_args); } -// Called by art::mirror::ArtMethod::Invoke to do entry into a non-static method. +// Called by art::ArtMethod::Invoke to do entry into a non-static method. // TODO: migrate into an assembly implementation as with ARM64. -extern "C" void art_quick_invoke_stub(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size, +extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size, Thread* self, JValue* result, const char* shorty) { quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty); } -// Called by art::mirror::ArtMethod::Invoke to do entry into a static method. +// Called by art::ArtMethod::Invoke to do entry into a static method. // TODO: migrate into an assembly implementation as with ARM64. -extern "C" void art_quick_invoke_static_stub(mirror::ArtMethod* method, uint32_t* args, +extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args, uint32_t args_size, Thread* self, JValue* result, const char* shorty) { quick_invoke_reg_setup<true>(method, args, args_size, self, result, shorty); diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc index cb5b9e1fa9..91c0fc9551 100644 --- a/runtime/arch/arm64/context_arm64.cc +++ b/runtime/arch/arm64/context_arm64.cc @@ -18,8 +18,8 @@ #include "context_arm64.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -38,7 +38,7 @@ void Arm64Context::Reset() { } void Arm64Context::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc index 0448c760ee..3e9ad0da62 100644 --- a/runtime/arch/arm64/fault_handler_arm64.cc +++ b/runtime/arch/arm64/fault_handler_arm64.cc @@ -16,14 +16,15 @@ #include "fault_handler.h" + #include <sys/ucontext.h> + +#include "art_method-inl.h" #include "base/macros.h" #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" #include "registers_arm64.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "thread.h" #include "thread-inl.h" @@ -53,7 +54,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info } void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context, - mirror::ArtMethod** out_method, + ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { struct ucontext *uc = reinterpret_cast<struct ucontext *>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); @@ -69,10 +70,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>( reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64)); if (overflow_addr == fault_addr) { - *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->regs[0]); + *out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]); } else { // The method is at the top of the stack. - *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr(); + *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index f8b0734428..f90a6b0367 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -32,7 +32,8 @@ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . THIS_LOAD_REQUIRES_READ_BARRIER - ldr wIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ] + // Loads appropriate callee-save-method. + ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ] sub sp, sp, #176 .cfi_adjust_cfa_offset 176 @@ -97,7 +98,8 @@ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . THIS_LOAD_REQUIRES_READ_BARRIER - ldr wIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ] + // Loads appropriate callee-save-method. + ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ] sub sp, sp, #112 .cfi_adjust_cfa_offset 112 @@ -271,7 +273,7 @@ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . THIS_LOAD_REQUIRES_READ_BARRIER - ldr wIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ] + ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ] SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL @@ -483,7 +485,7 @@ ENTRY \c_name // Helper signature is always // (method_idx, *this_object, *caller_method, *self, sp) - ldr w2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method* + ldr x2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method* mov x3, xSELF // pass Thread::Current mov x4, sp bl \cxx_name // (method_idx, this, caller, Thread*, SP) @@ -508,14 +510,14 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo .macro INVOKE_STUB_CREATE_FRAME SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved. -SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE +SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 mov x9, sp // Save stack pointer. .cfi_register sp,x9 add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame. - sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args + sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args and x10, x10, # ~0xf // Enforce 16 byte stack alignment. mov sp, x10 // Set new SP. @@ -565,7 +567,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE // W2 - args length // X9 - destination address. // W10 - temporary - add x9, sp, #4 // Destination address is bottom of stack + null. + add x9, sp, #8 // Destination address is bottom of stack + null. // Use \@ to differentiate between macro invocations. .LcopyParams\@: @@ -579,18 +581,14 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE .LendCopyParams\@: - // Store null into StackReference<Method>* at bottom of frame. - str wzr, [sp] - -#if (STACK_REFERENCE_SIZE != 4) -#error "STACK_REFERENCE_SIZE(ARM64) size not as expected." -#endif + // Store null into ArtMethod* at bottom of frame. + str xzr, [sp] .endm .macro INVOKE_STUB_CALL_AND_RETURN // load method-> METHOD_QUICK_CODE_OFFSET - ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64] + ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] // Branch to method. blr x9 @@ -681,7 +679,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE * | uint32_t out[n-1] | * | : : | Outs * | uint32_t out[0] | - * | StackRef<ArtMethod> | <- SP value=null + * | ArtMethod* | <- SP value=null * +----------------------+ * * Outgoing registers: @@ -1314,7 +1312,7 @@ END \name .extern \entrypoint ENTRY \name SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC - ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer + ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer mov x2, xSELF // pass Thread::Current bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @@ -1326,7 +1324,7 @@ END \name .extern \entrypoint ENTRY \name SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC - ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer + ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer mov x3, xSELF // pass Thread::Current bl \entrypoint RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @@ -1338,7 +1336,7 @@ END \name .extern \entrypoint ENTRY \name SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC - ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer + ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer mov x4, xSELF // pass Thread::Current bl \entrypoint RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @@ -1401,7 +1399,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE ENTRY art_quick_set64_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC mov x3, x1 // Store value - ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer + ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer mov x2, x3 // Put value param mov x3, xSELF // pass Thread::Current bl artSet64StaticFromCode @@ -1468,10 +1466,10 @@ END art_quick_proxy_invoke_handler * dex method index. */ ENTRY art_quick_imt_conflict_trampoline - ldr w0, [sp, #0] // load caller Method* - ldr w0, [x0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods - add x0, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET // get starting address of data - ldr w0, [x0, xIP1, lsl 2] // load the target method + ldr x0, [sp, #0] // load caller Method* + ldr w0, [x0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods + add x0, x0, #MIRROR_LONG_ARRAY_DATA_OFFSET // get starting address of data + ldr x0, [x0, xIP1, lsl 3] // load the target method b art_quick_invoke_interface_trampoline END art_quick_imt_conflict_trampoline @@ -1482,7 +1480,7 @@ ENTRY art_quick_resolution_trampoline bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) cbz x0, 1f mov xIP0, x0 // Remember returned code pointer in xIP0. - ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. + ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME br xIP0 1: diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc index 24892e95bb..53f2b65eec 100644 --- a/runtime/arch/mips/context_mips.cc +++ b/runtime/arch/mips/context_mips.cc @@ -16,8 +16,8 @@ #include "context_mips.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -36,7 +36,7 @@ void MipsContext::Reset() { } void MipsContext::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc index c9949d4295..abe495b46d 100644 --- a/runtime/arch/mips/fault_handler_mips.cc +++ b/runtime/arch/mips/fault_handler_mips.cc @@ -35,7 +35,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context ATTRIBUTE_UNUSED, - mirror::ArtMethod** out_method ATTRIBUTE_UNUSED, + ArtMethod** out_method ATTRIBUTE_UNUSED, uintptr_t* out_return_pc ATTRIBUTE_UNUSED, uintptr_t* out_sp ATTRIBUTE_UNUSED) { } diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index f3d2274552..f1e6edb0ff 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -526,7 +526,7 @@ ENTRY art_quick_invoke_stub lw $a1, 4($sp) # copy arg value for a1 lw $a2, 8($sp) # copy arg value for a2 lw $a3, 12($sp) # copy arg value for a3 - lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code + lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code jalr $t9 # call the method sw $zero, 0($sp) # store null for method* at bottom of frame move $sp, $fp # restore the stack @@ -1103,7 +1103,7 @@ END art_quick_proxy_invoke_handler */ ENTRY art_quick_imt_conflict_trampoline lw $a0, 0($sp) # load caller Method* - lw $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods + lw $a0, ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods sll $t0, 2 # convert target method offset to bytes add $a0, $t0 # get address of target method lw $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc index 8ce6cf0993..6637c371d2 100644 --- a/runtime/arch/mips64/context_mips64.cc +++ b/runtime/arch/mips64/context_mips64.cc @@ -16,8 +16,8 @@ #include "context_mips64.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -36,7 +36,7 @@ void Mips64Context::Reset() { } void Mips64Context::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc index 7b5cd4913c..277c2b2394 100644 --- a/runtime/arch/mips64/fault_handler_mips64.cc +++ b/runtime/arch/mips64/fault_handler_mips64.cc @@ -35,7 +35,7 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context ATTRIBUTE_UNUSED, - mirror::ArtMethod** out_method ATTRIBUTE_UNUSED, + ArtMethod** out_method ATTRIBUTE_UNUSED, uintptr_t* out_return_pc ATTRIBUTE_UNUSED, uintptr_t* out_sp ATTRIBUTE_UNUSED) { } diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index ff79b5d77c..227fe7e7f2 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -90,8 +90,8 @@ ld $v0, %got(_ZN3art7Runtime9instance_E)($gp) ld $v0, 0($v0) THIS_LOAD_REQUIRES_READ_BARRIER - lwu $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0) - sw $v0, 0($sp) # Place Method* at bottom of stack. + ld $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0) + sd $v0, 0($sp) # Place ArtMethod* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. .endm @@ -133,8 +133,8 @@ ld $v0, %got(_ZN3art7Runtime9instance_E)($gp) ld $v0, 0($v0) THIS_LOAD_REQUIRES_READ_BARRIER - lwu $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0) - sw $v0, 0($sp) # Place Method* at bottom of stack. + ld $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0) + sd $v0, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. .endm @@ -256,14 +256,14 @@ ld $v0, %got(_ZN3art7Runtime9instance_E)($gp) ld $v0, 0($v0) THIS_LOAD_REQUIRES_READ_BARRIER - lwu $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0) - sw $v0, 0($sp) # Place Method* at bottom of stack. + ld $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0) + sd $v0, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. .endm .macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0 SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL - sw $a0, 0($sp) # Place Method* at bottom of stack. + sd $a0, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. .endm @@ -642,7 +642,7 @@ ENTRY_NO_GP art_quick_invoke_stub move $s1, $a3 # move managed thread pointer into s1 (rSELF) move $s8, $sp # save sp in s8 (fp) - daddiu $t3, $a2, 20 # add 4 for method* and 16 for stack alignment + daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment dsrl $t3, $t3, 4 # shift the frame size right 4 dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes dsubu $sp, $sp, $t3 # reserve stack space for argument array @@ -650,7 +650,7 @@ ENTRY_NO_GP art_quick_invoke_stub daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type) daddiu $t1, $a1, 4 # t1 = ptr to arg_array[4] (skip this ptr) daddiu $t2, $a2, -4 # t2 = number of argument bytes remain (skip this ptr) - daddiu $v0, $sp, 8 # v0 points to where to copy arg_array + daddiu $v0, $sp, 12 # v0 points to where to copy arg_array LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn @@ -671,9 +671,9 @@ ENTRY_NO_GP art_quick_invoke_stub call_fn: # call method (a0 and a1 have been untouched) lwu $a1, 0($a1) # make a1 = this ptr - sw $a1, 4($sp) # copy this ptr (skip 4 bytes for method*) - sw $zero, 0($sp) # store null for method* at bottom of frame - ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code + sw $a1, 8($sp) # copy this ptr (skip 8 bytes for ArtMethod*) + sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame + ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code jalr $t9 # call the method nop move $sp, $s8 # restore sp @@ -745,7 +745,7 @@ ENTRY_NO_GP art_quick_invoke_static_stub move $s1, $a3 # move managed thread pointer into s1 (rSELF) move $s8, $sp # save sp in s8 (fp) - daddiu $t3, $a2, 20 # add 4 for method* and 16 for stack alignment + daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment dsrl $t3, $t3, 4 # shift the frame size right 4 dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes dsubu $sp, $sp, $t3 # reserve stack space for argument array @@ -753,7 +753,7 @@ ENTRY_NO_GP art_quick_invoke_static_stub daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type) move $t1, $a1 # t1 = arg_array move $t2, $a2 # t2 = number of argument bytes remain - daddiu $v0, $sp, 4 # v0 points to where to copy arg_array + daddiu $v0, $sp, 8 # v0 points to where to copy arg_array LOOP_OVER_SHORTY_LOADING_REG a1, f13, call_sfn LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_sfn LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_sfn @@ -774,8 +774,8 @@ ENTRY_NO_GP art_quick_invoke_static_stub call_sfn: # call method (a0 has been untouched) - sw $zero, 0($sp) # store null for method* at bottom of frame - ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code + sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame + ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code jalr $t9 # call the method nop move $sp, $s8 # restore sp @@ -822,7 +822,7 @@ END art_quick_invoke_static_stub .extern artHandleFillArrayDataFromCode ENTRY art_quick_handle_fill_data SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -970,7 +970,7 @@ END art_quick_aput_obj .extern artGetBooleanStaticFromCode ENTRY art_quick_get_boolean_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -982,7 +982,7 @@ END art_quick_get_boolean_static .extern artGetByteStaticFromCode ENTRY art_quick_get_byte_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -994,7 +994,7 @@ END art_quick_get_byte_static .extern artGetCharStaticFromCode ENTRY art_quick_get_char_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1006,7 +1006,7 @@ END art_quick_get_char_static .extern artGetShortStaticFromCode ENTRY art_quick_get_short_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1018,7 +1018,7 @@ END art_quick_get_short_static .extern artGet32StaticFromCode ENTRY art_quick_get32_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1030,7 +1030,7 @@ END art_quick_get32_static .extern artGet64StaticFromCode ENTRY art_quick_get64_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1042,7 +1042,7 @@ END art_quick_get64_static .extern artGetObjStaticFromCode ENTRY art_quick_get_obj_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*) move $a2, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1054,7 +1054,7 @@ END art_quick_get_obj_static .extern artGetBooleanInstanceFromCode ENTRY art_quick_get_boolean_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1066,7 +1066,7 @@ END art_quick_get_boolean_instance .extern artGetByteInstanceFromCode ENTRY art_quick_get_byte_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1078,7 +1078,7 @@ END art_quick_get_byte_instance .extern artGetCharInstanceFromCode ENTRY art_quick_get_char_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1090,7 +1090,7 @@ END art_quick_get_char_instance .extern artGetShortInstanceFromCode ENTRY art_quick_get_short_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1102,7 +1102,7 @@ END art_quick_get_short_instance .extern artGet32InstanceFromCode ENTRY art_quick_get32_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1114,7 +1114,7 @@ END art_quick_get32_instance .extern artGet64InstanceFromCode ENTRY art_quick_get64_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1126,7 +1126,7 @@ END art_quick_get64_instance .extern artGetObjInstanceFromCode ENTRY art_quick_get_obj_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_NO_EXCEPTION @@ -1138,7 +1138,7 @@ END art_quick_get_obj_instance .extern artSet8StaticFromCode ENTRY art_quick_set8_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1150,7 +1150,7 @@ END art_quick_set8_static .extern artSet16StaticFromCode ENTRY art_quick_set16_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1162,7 +1162,7 @@ END art_quick_set16_static .extern artSet32StaticFromCode ENTRY art_quick_set32_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1175,7 +1175,7 @@ END art_quick_set32_static ENTRY art_quick_set64_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, $a1 # pass new_val - lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1187,7 +1187,7 @@ END art_quick_set64_static .extern artSetObjStaticFromCode ENTRY art_quick_set_obj_static SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*) move $a3, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1199,7 +1199,7 @@ END art_quick_set_obj_static .extern artSet8InstanceFromCode ENTRY art_quick_set8_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*) move $a4, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1211,7 +1211,7 @@ END art_quick_set8_instance .extern artSet16InstanceFromCode ENTRY art_quick_set16_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*) move $a4, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1223,7 +1223,7 @@ END art_quick_set16_instance .extern artSet32InstanceFromCode ENTRY art_quick_set32_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*) move $a4, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1235,7 +1235,7 @@ END art_quick_set32_instance .extern artSet64InstanceFromCode ENTRY art_quick_set64_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*) move $a4, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1247,7 +1247,7 @@ END art_quick_set64_instance .extern artSetObjInstanceFromCode ENTRY art_quick_set_obj_instance SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC - lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* + ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method* jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*) move $a4, rSELF # pass Thread::Current RETURN_IF_ZERO @@ -1366,14 +1366,14 @@ END art_quick_proxy_invoke_handler * dex method index. */ ENTRY art_quick_imt_conflict_trampoline - lwu $a0, 0($sp) # load caller Method* - lwu $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods - dsll $t0, 2 # convert target method offset to bytes + ld $a0, 0($sp) # load caller Method* + lwu $a0, ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods + dsll $t0, 3 # convert target method offset to bytes daddu $a0, $t0 # get address of target method dla $t9, art_quick_invoke_interface_trampoline .cpreturn jalr $zero, $t9 - lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method + lwu $a0, MIRROR_LONG_ARRAY_DATA_OFFSET($a0) # load the target method END art_quick_imt_conflict_trampoline .extern artQuickResolutionTrampoline @@ -1383,7 +1383,7 @@ ENTRY art_quick_resolution_trampoline jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP) move $a3, $sp # pass $sp beq $v0, $zero, 1f - lwu $a0, 0($sp) # load resolved method in $a0 + ld $a0, 0($sp) # load resolved method in $a0 # artQuickResolutionTrampoline puts resolved method in *SP RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME move $t9, $v0 # code pointer must be in $t9 to generate the global pointer diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index a7d24b8e93..13acaa7512 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -17,10 +17,10 @@ #include <cstdio> #include "art_field-inl.h" +#include "art_method-inl.h" #include "class_linker-inl.h" #include "common_runtime_test.h" #include "entrypoints/quick/quick_entrypoints_enum.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/string-inl.h" #include "scoped_thread_state_change.h" @@ -70,7 +70,7 @@ class StubTest : public CommonRuntimeTest { // TODO: Set up a frame according to referrer's specs. size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { // Push a transition back into managed code onto the linked list in thread. ManagedStack fragment; self->PushManagedStackFragment(&fragment); @@ -420,7 +420,7 @@ class StubTest : public CommonRuntimeTest { // TODO: Set up a frame according to referrer's specs. size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, - Thread* self, mirror::ArtMethod* referrer, size_t hidden) { + Thread* self, ArtMethod* referrer, size_t hidden) { // Push a transition back into managed code onto the linked list in thread. ManagedStack fragment; self->PushManagedStackFragment(&fragment); @@ -776,7 +776,7 @@ class StubTest : public CommonRuntimeTest { // Method with 32b arg0, 64b arg1 size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) // Just pass through. @@ -1282,7 +1282,8 @@ TEST_F(StubTest, AllocObject) { { // Use an arbitrary method from c to use as referrer size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx - reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary + // arbitrary + reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))), 0U, StubTest::GetEntrypoint(self, kQuickAllocObject), self); @@ -1297,7 +1298,7 @@ TEST_F(StubTest, AllocObject) { { // We can use null in the second argument as we do not need a method here (not used in // resolved/initialized cases) - size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U, + size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, StubTest::GetEntrypoint(self, kQuickAllocObjectResolved), self); @@ -1311,7 +1312,7 @@ TEST_F(StubTest, AllocObject) { { // We can use null in the second argument as we do not need a method here (not used in // resolved/initialized cases) - size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U, + size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized), self); @@ -1367,7 +1368,7 @@ TEST_F(StubTest, AllocObject) { } self->ClearException(); - size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U, + size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized), self); EXPECT_TRUE(self->IsExceptionPending()); @@ -1417,7 +1418,8 @@ TEST_F(StubTest, AllocObjectArray) { // Use an arbitrary method from c to use as referrer size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx 10U, - reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary + // arbitrary + reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))), StubTest::GetEntrypoint(self, kQuickAllocArray), self); @@ -1554,7 +1556,7 @@ TEST_F(StubTest, StringCompareTo) { static void GetSetBooleanStatic(ArtField* f, Thread* self, - mirror::ArtMethod* referrer, StubTest* test) + ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1584,7 +1586,7 @@ static void GetSetBooleanStatic(ArtField* f, Thread* self, std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl; #endif } -static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer, +static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ @@ -1616,7 +1618,7 @@ static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* refer static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, - mirror::ArtMethod* referrer, StubTest* test) + ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1651,7 +1653,7 @@ static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thre #endif } static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1685,7 +1687,7 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, #endif } -static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer, +static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ @@ -1716,7 +1718,7 @@ static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* refer #endif } static void GetSetShortStatic(ArtField* f, Thread* self, - mirror::ArtMethod* referrer, StubTest* test) + ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1747,7 +1749,7 @@ static void GetSetShortStatic(ArtField* f, Thread* self, } static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1781,7 +1783,7 @@ static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, #endif } static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1815,7 +1817,7 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, #endif } -static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer, +static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ @@ -1852,7 +1854,7 @@ static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referre static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1893,7 +1895,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, (defined(__x86_64__) && !defined(__APPLE__)) static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self, - mirror::ArtMethod* referrer, StubTest* test) + ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f_idx), reinterpret_cast<size_t>(val), @@ -1912,7 +1914,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se } #endif -static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer, +static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ @@ -1936,7 +1938,7 @@ static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referr #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) static void set_and_check_instance(ArtField* f, mirror::Object* trg, - mirror::Object* val, Thread* self, mirror::ArtMethod* referrer, + mirror::Object* val, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()), @@ -1960,7 +1962,7 @@ static void set_and_check_instance(ArtField* f, mirror::Object* trg, #endif static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) @@ -1982,7 +1984,7 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, // TODO: Complete these tests for 32b architectures. -static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer, +static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ @@ -2014,7 +2016,7 @@ static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referre static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f, - Thread* self, mirror::ArtMethod* referrer, StubTest* test) + Thread* self, ArtMethod* referrer, StubTest* test) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) @@ -2060,11 +2062,11 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) CHECK(o != nullptr); ScopedObjectAccess soa(self); - StackHandleScope<4> hs(self); + StackHandleScope<3> hs(self); Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o))); Handle<mirror::Class> c(hs.NewHandle(obj->GetClass())); // Need a method as a referrer - Handle<mirror::ArtMethod> m(hs.NewHandle(c->GetDirectMethod(0))); + ArtMethod* m = c->GetDirectMethod(0, sizeof(void*)); // Play with it... @@ -2079,27 +2081,27 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) } switch (type) { case Primitive::Type::kPrimBoolean: - GetSetBooleanStatic(f, self, m.Get(), test); + GetSetBooleanStatic(f, self, m, test); break; case Primitive::Type::kPrimByte: - GetSetByteStatic(f, self, m.Get(), test); + GetSetByteStatic(f, self, m, test); break; case Primitive::Type::kPrimChar: - GetSetCharStatic(f, self, m.Get(), test); + GetSetCharStatic(f, self, m, test); break; case Primitive::Type::kPrimShort: - GetSetShortStatic(f, self, m.Get(), test); + GetSetShortStatic(f, self, m, test); break; case Primitive::Type::kPrimInt: - GetSet32Static(f, self, m.Get(), test); + GetSet32Static(f, self, m, test); break; case Primitive::Type::kPrimLong: - GetSet64Static(f, self, m.Get(), test); + GetSet64Static(f, self, m, test); break; case Primitive::Type::kPrimNot: // Don't try array. if (f->GetTypeDescriptor()[0] != '[') { - GetSetObjStatic(f, self, m.Get(), test); + GetSetObjStatic(f, self, m, test); } break; default: @@ -2118,27 +2120,27 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) } switch (type) { case Primitive::Type::kPrimBoolean: - GetSetBooleanInstance(&obj, f, self, m.Get(), test); + GetSetBooleanInstance(&obj, f, self, m, test); break; case Primitive::Type::kPrimByte: - GetSetByteInstance(&obj, f, self, m.Get(), test); + GetSetByteInstance(&obj, f, self, m, test); break; case Primitive::Type::kPrimChar: - GetSetCharInstance(&obj, f, self, m.Get(), test); + GetSetCharInstance(&obj, f, self, m, test); break; case Primitive::Type::kPrimShort: - GetSetShortInstance(&obj, f, self, m.Get(), test); + GetSetShortInstance(&obj, f, self, m, test); break; case Primitive::Type::kPrimInt: - GetSet32Instance(&obj, f, self, m.Get(), test); + GetSet32Instance(&obj, f, self, m, test); break; case Primitive::Type::kPrimLong: - GetSet64Instance(&obj, f, self, m.Get(), test); + GetSet64Instance(&obj, f, self, m, test); break; case Primitive::Type::kPrimNot: // Don't try array. if (f->GetTypeDescriptor()[0] != '[') { - GetSetObjInstance(&obj, f, self, m.Get(), test); + GetSetObjInstance(&obj, f, self, m, test); } break; default: @@ -2235,17 +2237,18 @@ TEST_F(StubTest, IMT) { ASSERT_NE(nullptr, arraylist_jclass); jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V"); ASSERT_NE(nullptr, arraylist_constructor); - jmethodID contains_jmethod = env->GetMethodID(arraylist_jclass, "contains", "(Ljava/lang/Object;)Z"); + jmethodID contains_jmethod = env->GetMethodID( + arraylist_jclass, "contains", "(Ljava/lang/Object;)Z"); ASSERT_NE(nullptr, contains_jmethod); jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z"); ASSERT_NE(nullptr, add_jmethod); - // Get mirror representation. - Handle<mirror::ArtMethod> contains_amethod(hs.NewHandle(soa.DecodeMethod(contains_jmethod))); + // Get representation. + ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod); // Patch up ArrayList.contains. - if (contains_amethod.Get()->GetEntryPointFromQuickCompiledCode() == nullptr) { - contains_amethod.Get()->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>( + if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) { + contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>( StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge))); } @@ -2254,11 +2257,12 @@ TEST_F(StubTest, IMT) { // Load List and used methods (JNI). jclass list_jclass = env->FindClass("java/util/List"); ASSERT_NE(nullptr, list_jclass); - jmethodID inf_contains_jmethod = env->GetMethodID(list_jclass, "contains", "(Ljava/lang/Object;)Z"); + jmethodID inf_contains_jmethod = env->GetMethodID( + list_jclass, "contains", "(Ljava/lang/Object;)Z"); ASSERT_NE(nullptr, inf_contains_jmethod); // Get mirror representation. - Handle<mirror::ArtMethod> inf_contains(hs.NewHandle(soa.DecodeMethod(inf_contains_jmethod))); + ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod); // Object @@ -2287,8 +2291,8 @@ TEST_F(StubTest, IMT) { Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(obj.Get()), StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline), - self, contains_amethod.Get(), - static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex())); + self, contains_amethod, + static_cast<size_t>(inf_contains->GetDexMethodIndex())); ASSERT_FALSE(self->IsExceptionPending()); EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result); @@ -2301,33 +2305,31 @@ TEST_F(StubTest, IMT) { // Contains. - result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()), - reinterpret_cast<size_t>(obj.Get()), - StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline), - self, contains_amethod.Get(), - static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex())); + result = Invoke3WithReferrerAndHidden( + 0U, reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(obj.Get()), + StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline), self, contains_amethod, + static_cast<size_t>(inf_contains->GetDexMethodIndex())); ASSERT_FALSE(self->IsExceptionPending()); EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result); // 2. regular interface trampoline - result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()), + result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()), reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(obj.Get()), StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), - self, contains_amethod.Get()); + self, contains_amethod); ASSERT_FALSE(self->IsExceptionPending()); EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result); - result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()), - reinterpret_cast<size_t>(array_list.Get()), - reinterpret_cast<size_t>(array_list.Get()), - StubTest::GetEntrypoint(self, - kQuickInvokeInterfaceTrampolineWithAccessCheck), - self, contains_amethod.Get()); + result = Invoke3WithReferrer( + static_cast<size_t>(inf_contains->GetDexMethodIndex()), + reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()), + StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self, + contains_amethod); ASSERT_FALSE(self->IsExceptionPending()); EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result); diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc index 06bae75585..7096c82aad 100644 --- a/runtime/arch/x86/context_x86.cc +++ b/runtime/arch/x86/context_x86.cc @@ -16,8 +16,8 @@ #include "context_x86.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -35,7 +35,7 @@ void X86Context::Reset() { } void X86Context::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc index 2de69aa679..d7c4cb182a 100644 --- a/runtime/arch/x86/fault_handler_x86.cc +++ b/runtime/arch/x86/fault_handler_x86.cc @@ -16,13 +16,14 @@ #include "fault_handler.h" + #include <sys/ucontext.h> + +#include "art_method-inl.h" #include "base/macros.h" #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "thread.h" #include "thread-inl.h" @@ -248,7 +249,7 @@ void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) { } void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, - mirror::ArtMethod** out_method, + ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); *out_sp = static_cast<uintptr_t>(uc->CTX_ESP); @@ -267,10 +268,10 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86)); #endif if (overflow_addr == fault_addr) { - *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_METHOD); + *out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD); } else { // The method is at the top of the stack. - *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr(); + *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); } uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP); diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 6ebeba3aaf..9cebb4ed55 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -477,7 +477,7 @@ DEFINE_FUNCTION art_quick_invoke_stub // Nothing left to load. .Lgpr_setup_finished: mov 20(%ebp), %eax // move method pointer into eax - call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method + call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method mov %ebp, %esp // restore stack pointer CFI_DEF_CFA_REGISTER(esp) POP edi // pop edi @@ -594,7 +594,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub // Nothing left to load. .Lgpr_setup_finished2: mov 20(%ebp), %eax // move method pointer into eax - call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method + call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method mov %ebp, %esp // restore stack pointer CFI_DEF_CFA_REGISTER(esp) POP edi // pop edi @@ -1396,7 +1396,7 @@ END_FUNCTION art_quick_proxy_invoke_handler DEFINE_FUNCTION art_quick_imt_conflict_trampoline PUSH ecx movl 8(%esp), %eax // load caller Method* - movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods + movl ART_METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods movd %xmm7, %ecx // get target method index stored in xmm0 movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax // load the target method POP ecx diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc index 2c4532c42b..1fe2ef8fd8 100644 --- a/runtime/arch/x86_64/context_x86_64.cc +++ b/runtime/arch/x86_64/context_x86_64.cc @@ -16,8 +16,8 @@ #include "context_x86_64.h" +#include "art_method-inl.h" #include "base/bit_utils.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -35,7 +35,7 @@ void X86_64Context::Reset() { } void X86_64Context::FillCalleeSaves(const StackVisitor& fr) { - mirror::ArtMethod* method = fr.GetMethod(); + ArtMethod* method = fr.GetMethod(); const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo(); int spill_pos = 0; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index da4d92b889..bd199dbb82 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -57,7 +57,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) PUSH r12 // Callee save. PUSH rbp // Callee save. PUSH rbx // Callee save. - // Create space for FPR args, plus space for StackReference<ArtMethod>. + // Create space for FPR args, plus space for ArtMethod*. subq MACRO_LITERAL(4 * 8 + 8), %rsp CFI_ADJUST_CFA_OFFSET(4 * 8 + 8) // Save FPRs. @@ -67,7 +67,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for save all callee save frame method. THIS_LOAD_REQUIRES_READ_BARRIER - movl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d + movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) // Store rsp as the top quick frame. @@ -100,7 +100,7 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME) PUSH r12 // Callee save. PUSH rbp // Callee save. PUSH rbx // Callee save. - // Create space for FPR args, plus space for StackReference<ArtMethod>. + // Create space for FPR args, plus space for ArtMethod*. subq LITERAL(8 + 4 * 8), %rsp CFI_ADJUST_CFA_OFFSET(8 + 4 * 8) // Save FPRs. @@ -110,7 +110,7 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for refs only callee save frame method. THIS_LOAD_REQUIRES_READ_BARRIER - movl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d + movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) // Store rsp as the stop quick frame. @@ -164,13 +164,12 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME) PUSH rbx // Callee save. PUSH rdx // Quick arg 2. PUSH rcx // Quick arg 3. - // Create space for FPR args and create 2 slots, 1 of padding and 1 for the - // StackReference<ArtMethod>. + // Create space for FPR args and create 2 slots for ArtMethod*. subq MACRO_LITERAL(80 + 4 * 8), %rsp CFI_ADJUST_CFA_OFFSET(80 + 4 * 8) // R10 := ArtMethod* for ref and args callee save frame method. THIS_LOAD_REQUIRES_READ_BARRIER - movl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d + movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Save FPRs. movq %xmm0, 16(%rsp) movq %xmm1, 24(%rsp) @@ -210,8 +209,7 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI) PUSH rbx // Callee save. PUSH rdx // Quick arg 2. PUSH rcx // Quick arg 3. - // Create space for FPR args and create 2 slots, 1 of padding and 1 for the - // StackReference<ArtMethod>. + // Create space for FPR args and create 2 slots for ArtMethod*. subq LITERAL(80 + 4 * 8), %rsp CFI_ADJUST_CFA_OFFSET(80 + 4 * 8) // Save FPRs. @@ -362,7 +360,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) // Helper signature is always // (method_idx, *this_object, *caller_method, *self, sp) - movl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE(%rsp), %edx // pass caller Method* + movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE(%rsp), %rdx // pass caller Method* movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread movq %rsp, %r8 // pass SP @@ -506,13 +504,13 @@ DEFINE_FUNCTION art_quick_invoke_stub #if (STACK_REFERENCE_SIZE != 4) #error "STACK_REFERENCE_SIZE(X86_64) size not as expected." #endif - movl LITERAL(0), (%rsp) // Store null for method* + movq LITERAL(0), (%rsp) // Store null for method* movl %r10d, %ecx // Place size of args in rcx. movq %rdi, %rax // rax := method to be called movq %rsi, %r11 // r11 := arg_array - leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the - // stack arguments. + leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the stack + // arguments. // Copy arg array into stack. rep movsb // while (rcx--) { *rdi++ = *rsi++ } leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character @@ -524,7 +522,7 @@ DEFINE_FUNCTION art_quick_invoke_stub LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished .Lgpr_setup_finished: - call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. + call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. movq %rbp, %rsp // Restore stack pointer. POP r15 // Pop r15 POP r14 // Pop r14 @@ -600,12 +598,12 @@ DEFINE_FUNCTION art_quick_invoke_static_stub #if (STACK_REFERENCE_SIZE != 4) #error "STACK_REFERENCE_SIZE(X86_64) size not as expected." #endif - movl LITERAL(0), (%rsp) // Store null for method* + movq LITERAL(0), (%rsp) // Store null for method* movl %r10d, %ecx // Place size of args in rcx. movq %rdi, %rax // rax := method to be called movq %rsi, %r11 // r11 := arg_array - leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the + leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the // stack arguments. // Copy arg array into stack. rep movsb // while (rcx--) { *rdi++ = *rsi++ } @@ -617,7 +615,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2 LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2 .Lgpr_setup_finished2: - call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. + call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. movq %rbp, %rsp // Restore stack pointer. POP r15 // Pop r15 POP r14 // Pop r14 @@ -751,7 +749,7 @@ END_MACRO MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %esi // pass referrer + movq 8(%rsp), %rsi // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0 is in rdi movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() @@ -763,7 +761,7 @@ END_MACRO MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %edx // pass referrer + movq 8(%rsp), %rdx // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0 and arg1 are in rdi/rsi movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() @@ -775,7 +773,7 @@ END_MACRO MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) DEFINE_FUNCTION VAR(c_name, 0) - movl 8(%rsp), %ecx // pass referrer + movq 8(%rsp), %rcx // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0, arg1, and arg2 are in rdi/rsi/rdx movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current() @@ -922,7 +920,7 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab // Fast path tlab allocation. // RDI: uint32_t type_idx, RSI: ArtMethod* // RDX, RCX, R8, R9: free. RAX: return val. - movl MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array + movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array // Load the class movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx testl %edx, %edx // Check null class @@ -1309,7 +1307,7 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_O // This is singled out as the argument order is different. DEFINE_FUNCTION art_quick_set64_static movq %rsi, %rdx // pass new_val - movl 8(%rsp), %esi // pass referrer + movq 8(%rsp), %rsi // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // field_idx is in rdi movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() @@ -1340,9 +1338,9 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline int3 int3 #else - movl 8(%rsp), %edi // load caller Method* - movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods - movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method + movq 8(%rsp), %rdi // load caller Method* + movl ART_METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods + movq MIRROR_LONG_ARRAY_DATA_OFFSET(%rdi, %rax, 8), %rdi // load the target method jmp art_quick_invoke_interface_trampoline #endif // __APPLE__ END_FUNCTION art_quick_imt_conflict_trampoline @@ -1395,7 +1393,6 @@ END_FUNCTION art_quick_resolution_trampoline * | XMM2 | float arg 3 * | XMM1 | float arg 2 * | XMM0 | float arg 1 - * | Padding | * | RDI/Method* | <- sp * #-------------------# * | Scratch Alloca | 5K scratch space diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index 4991ad7513..ee51ec9f1c 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -328,6 +328,11 @@ inline mirror::String* ArtField::GetStringName(Thread* self, bool resolve) { return name; } +template<typename RootVisitorType> +inline void ArtField::VisitRoots(RootVisitorType& visitor) { + visitor.VisitRoot(declaring_class_.AddressWithoutBarrier()); +} + } // namespace art #endif // ART_RUNTIME_ART_FIELD_INL_H_ diff --git a/runtime/art_field.cc b/runtime/art_field.cc index 47d5a76dc7..e4a583404a 100644 --- a/runtime/art_field.cc +++ b/runtime/art_field.cc @@ -20,6 +20,7 @@ #include "class_linker-inl.h" #include "gc/accounting/card_table-inl.h" #include "handle_scope.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "runtime.h" @@ -46,10 +47,6 @@ void ArtField::SetOffset(MemberOffset num_bytes) { offset_ = num_bytes.Uint32Value(); } -void ArtField::VisitRoots(RootVisitor* visitor) { - declaring_class_.VisitRoot(visitor, RootInfo(kRootStickyClass)); -} - ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) { DCHECK(klass != nullptr); auto* instance_fields = klass->GetIFields(); diff --git a/runtime/art_field.h b/runtime/art_field.h index 9d3dbd9e31..7a03723d00 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -38,7 +38,7 @@ class Object; class String; } // namespace mirror -class ArtField { +class ArtField FINAL { public: ArtField(); @@ -151,8 +151,8 @@ class ArtField { void SetObj(mirror::Object* object, mirror::Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<typename RootVisitorType> + void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (GetAccessFlags() & kAccVolatile) != 0; diff --git a/runtime/mirror/art_method-inl.h b/runtime/art_method-inl.h index 7c8067adcd..5cfce41cc0 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -14,19 +14,19 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_MIRROR_ART_METHOD_INL_H_ -#define ART_RUNTIME_MIRROR_ART_METHOD_INL_H_ +#ifndef ART_RUNTIME_ART_METHOD_INL_H_ +#define ART_RUNTIME_ART_METHOD_INL_H_ #include "art_method.h" #include "art_field.h" -#include "class.h" -#include "class_linker-inl.h" -#include "dex_cache.h" #include "dex_file.h" #include "dex_file-inl.h" -#include "object-inl.h" -#include "object_array.h" +#include "gc_root-inl.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/object-inl.h" +#include "mirror/object_array.h" #include "oat.h" #include "quick/quick_method_frame_info.h" #include "read_barrier-inl.h" @@ -34,73 +34,82 @@ #include "utils.h" namespace art { -namespace mirror { -inline uint32_t ArtMethod::ClassSize() { - uint32_t vtable_entries = Object::kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); +inline mirror::Class* ArtMethod::GetDeclaringClassUnchecked() { + return declaring_class_.Read(); } -template<ReadBarrierOption kReadBarrierOption> -inline Class* ArtMethod::GetJavaLangReflectArtMethod() { - DCHECK(!java_lang_reflect_ArtMethod_.IsNull()); - return java_lang_reflect_ArtMethod_.Read<kReadBarrierOption>(); +inline mirror::Class* ArtMethod::GetDeclaringClassNoBarrier() { + return declaring_class_.Read<kWithoutReadBarrier>(); } -inline Class* ArtMethod::GetDeclaringClass() { - Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_)); - DCHECK(result != nullptr) << this; - DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this; +inline mirror::Class* ArtMethod::GetDeclaringClass() { + mirror::Class* result = GetDeclaringClassUnchecked(); + if (kIsDebugBuild) { + if (!IsRuntimeMethod()) { + CHECK(result != nullptr) << this; + CHECK(result->IsIdxLoaded() || result->IsErroneous()) + << result->GetStatus() << " " << PrettyClass(result); + } else { + CHECK(result == nullptr) << this; + } + } return result; } -inline void ArtMethod::SetDeclaringClass(Class *new_declaring_class) { - SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_), - new_declaring_class); +inline void ArtMethod::SetDeclaringClass(mirror::Class* new_declaring_class) { + declaring_class_ = GcRoot<mirror::Class>(new_declaring_class); } inline uint32_t ArtMethod::GetAccessFlags() { - DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_)); + DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() || + GetDeclaringClass()->IsErroneous()); + return access_flags_; } inline uint16_t ArtMethod::GetMethodIndex() { - DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_)); + DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsResolved() || + GetDeclaringClass()->IsErroneous()); + return method_index_; } inline uint16_t ArtMethod::GetMethodIndexDuringLinking() { - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_)); + return method_index_; } inline uint32_t ArtMethod::GetDexMethodIndex() { - DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous()); - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_)); + DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() || + GetDeclaringClass()->IsErroneous()); + return dex_method_index_; } -inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() { - return GetFieldObject<ObjectArray<ArtMethod>>( - OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_)); +inline mirror::PointerArray* ArtMethod::GetDexCacheResolvedMethods() { + return dex_cache_resolved_methods_.Read(); } -inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index) { - ArtMethod* method = GetDexCacheResolvedMethods()->Get(method_index); - if (method != nullptr && !method->GetDeclaringClass()->IsErroneous()) { - return method; - } else { - return nullptr; +inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size) { + auto* method = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>( + method_index, ptr_size); + if (LIKELY(method != nullptr)) { + auto* declaring_class = method->GetDeclaringClass(); + if (LIKELY(declaring_class == nullptr || !declaring_class->IsErroneous())) { + return method; + } } + return nullptr; } -inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method) { - GetDexCacheResolvedMethods()->Set<false>(method_idx, new_method); +inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method, + size_t ptr_size) { + DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr); + GetDexCacheResolvedMethods()->SetElementPtrSize(method_idx, new_method, ptr_size); } inline bool ArtMethod::HasDexCacheResolvedMethods() { return GetDexCacheResolvedMethods() != nullptr; } -inline bool ArtMethod::HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache) { +inline bool ArtMethod::HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache) { return GetDexCacheResolvedMethods() == other_cache; } @@ -108,20 +117,15 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other) { return GetDexCacheResolvedMethods() == other->GetDexCacheResolvedMethods(); } - -inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() { - return GetFieldObject<ObjectArray<Class>>( - OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_)); +inline mirror::ObjectArray<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes() { + return dex_cache_resolved_types_.Read(); } template <bool kWithCheck> -inline Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) { - Class* klass; - if (kWithCheck) { - klass = GetDexCacheResolvedTypes()->Get(type_index); - } else { - klass = GetDexCacheResolvedTypes()->GetWithoutChecks(type_index); - } +inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) { + mirror::Class* klass = kWithCheck ? + GetDexCacheResolvedTypes()->Get(type_index) : + GetDexCacheResolvedTypes()->GetWithoutChecks(type_index); return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr; } @@ -129,7 +133,8 @@ inline bool ArtMethod::HasDexCacheResolvedTypes() { return GetDexCacheResolvedTypes() != nullptr; } -inline bool ArtMethod::HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache) { +inline bool ArtMethod::HasSameDexCacheResolvedTypes( + mirror::ObjectArray<mirror::Class>* other_cache) { return GetDexCacheResolvedTypes() == other_cache; } @@ -165,7 +170,7 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) { case kDirect: return !IsDirect() || IsStatic(); case kVirtual: { - Class* methods_class = GetDeclaringClass(); + mirror::Class* methods_class = GetDeclaringClass(); return IsDirect() || (methods_class->IsInterface() && !IsMiranda()); } case kSuper: @@ -173,7 +178,7 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) { // Interface methods cannot be invoked with invoke-super. return IsConstructor() || IsStatic() || GetDeclaringClass()->IsInterface(); case kInterface: { - Class* methods_class = GetDeclaringClass(); + mirror::Class* methods_class = GetDeclaringClass(); return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass()); } default: @@ -237,7 +242,8 @@ inline CodeInfo ArtMethod::GetOptimizedCodeInfo() { DCHECK(code_pointer != nullptr); uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_; - const void* data = reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset); + const void* data = + reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset); return CodeInfo(data); } @@ -261,7 +267,7 @@ inline const uint8_t* ArtMethod::GetNativeGcMap(const void* code_pointer, size_t } inline bool ArtMethod::IsRuntimeMethod() { - return GetDexMethodIndex() == DexFile::kDexNoIndex; + return dex_method_index_ == DexFile::kDexNoIndex; } inline bool ArtMethod::IsCalleeSaveMethod() { @@ -317,48 +323,48 @@ inline const DexFile* ArtMethod::GetDexFile() { } inline const char* ArtMethod::GetDeclaringClassDescriptor() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - uint32_t dex_method_idx = method->GetDexMethodIndex(); + uint32_t dex_method_idx = GetDexMethodIndex(); if (UNLIKELY(dex_method_idx == DexFile::kDexNoIndex)) { return "<runtime method>"; } - const DexFile* dex_file = method->GetDexFile(); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); return dex_file->GetMethodDeclaringClassDescriptor(dex_file->GetMethodId(dex_method_idx)); } inline const char* ArtMethod::GetShorty(uint32_t* out_length) { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); - return dex_file->GetMethodShorty(dex_file->GetMethodId(method->GetDexMethodIndex()), out_length); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); + return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length); } inline const Signature ArtMethod::GetSignature() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - uint32_t dex_method_idx = method->GetDexMethodIndex(); + uint32_t dex_method_idx = GetDexMethodIndex(); if (dex_method_idx != DexFile::kDexNoIndex) { - const DexFile* dex_file = method->GetDexFile(); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); return dex_file->GetMethodSignature(dex_file->GetMethodId(dex_method_idx)); } return Signature::NoSignature(); } inline const char* ArtMethod::GetName() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - uint32_t dex_method_idx = method->GetDexMethodIndex(); + uint32_t dex_method_idx = GetDexMethodIndex(); if (LIKELY(dex_method_idx != DexFile::kDexNoIndex)) { - const DexFile* dex_file = method->GetDexFile(); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); return dex_file->GetMethodName(dex_file->GetMethodId(dex_method_idx)); } - Runtime* runtime = Runtime::Current(); - if (method == runtime->GetResolutionMethod()) { + Runtime* const runtime = Runtime::Current(); + if (this == runtime->GetResolutionMethod()) { return "<runtime internal resolution method>"; - } else if (method == runtime->GetImtConflictMethod()) { + } else if (this == runtime->GetImtConflictMethod()) { return "<runtime internal imt conflict method>"; - } else if (method == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) { + } else if (this == runtime->GetCalleeSaveMethod(Runtime::kSaveAll)) { return "<runtime internal callee-save all registers method>"; - } else if (method == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) { + } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)) { return "<runtime internal callee-save reference registers method>"; - } else if (method == runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)) { + } else if (this == runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)) { return "<runtime internal callee-save reference and argument registers method>"; } else { return "<unknown runtime internal method>"; @@ -370,92 +376,96 @@ inline const DexFile::CodeItem* ArtMethod::GetCodeItem() { } inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx) { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - return method->GetDexCacheResolvedType(type_idx) != nullptr; + DCHECK(!IsProxyMethod()); + return GetDexCacheResolvedType(type_idx) != nullptr; } inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); + DCHECK(!IsProxyMethod()); if (dex_pc == DexFile::kDexNoIndex) { - return method->IsNative() ? -2 : -1; + return IsNative() ? -2 : -1; } - return method->GetDexFile()->GetLineNumFromPC(method, dex_pc); + return GetDexFile()->GetLineNumFromPC(this, dex_pc); } inline const DexFile::ProtoId& ArtMethod::GetPrototype() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); - return dex_file->GetMethodPrototype(dex_file->GetMethodId(method->GetDexMethodIndex())); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); + return dex_file->GetMethodPrototype(dex_file->GetMethodId(GetDexMethodIndex())); } inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); const DexFile::ProtoId& proto = dex_file->GetMethodPrototype( - dex_file->GetMethodId(method->GetDexMethodIndex())); + dex_file->GetMethodId(GetDexMethodIndex())); return dex_file->GetProtoParameters(proto); } inline const char* ArtMethod::GetDeclaringClassSourceFile() { - return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetSourceFile(); + DCHECK(!IsProxyMethod()); + return GetDeclaringClass()->GetSourceFile(); } inline uint16_t ArtMethod::GetClassDefIndex() { - return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexClassDefIndex(); + DCHECK(!IsProxyMethod()); + return GetDeclaringClass()->GetDexClassDefIndex(); } inline const DexFile::ClassDef& ArtMethod::GetClassDef() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - return method->GetDexFile()->GetClassDef(GetClassDefIndex()); + DCHECK(!IsProxyMethod()); + return GetDexFile()->GetClassDef(GetClassDefIndex()); } inline const char* ArtMethod::GetReturnTypeDescriptor() { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); - const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex()); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); + const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex()); const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id); uint16_t return_type_idx = proto_id.return_type_idx_; return dex_file->GetTypeDescriptor(dex_file->GetTypeId(return_type_idx)); } inline const char* ArtMethod::GetTypeDescriptorFromTypeIdx(uint16_t type_idx) { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); + DCHECK(!IsProxyMethod()); + const DexFile* dex_file = GetDexFile(); return dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx)); } inline mirror::ClassLoader* ArtMethod::GetClassLoader() { - return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetClassLoader(); + DCHECK(!IsProxyMethod()); + return GetDeclaringClass()->GetClassLoader(); } inline mirror::DexCache* ArtMethod::GetDexCache() { - return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache(); + DCHECK(!IsProxyMethod()); + return GetDeclaringClass()->GetDexCache(); } inline bool ArtMethod::IsProxyMethod() { return GetDeclaringClass()->IsProxyClass(); } -inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy() { +inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) { if (LIKELY(!IsProxyMethod())) { return this; } mirror::Class* klass = GetDeclaringClass(); - mirror::ArtMethod* interface_method = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex()); + auto interface_method = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>( + GetDexMethodIndex(), pointer_size); DCHECK(interface_method != nullptr); DCHECK_EQ(interface_method, Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this)); return interface_method; } -inline void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) { - SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_), - new_dex_cache_methods); +inline void ArtMethod::SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods) { + dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>(new_dex_cache_methods); } -inline void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) { - SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_), - new_dex_cache_classes); +inline void ArtMethod::SetDexCacheResolvedTypes( + mirror::ObjectArray<mirror::Class>* new_dex_cache_types) { + dex_cache_resolved_types_ = GcRoot<mirror::ObjectArray<mirror::Class>>(new_dex_cache_types); } inline mirror::Class* ArtMethod::GetReturnType(bool resolve) { @@ -472,17 +482,23 @@ inline mirror::Class* ArtMethod::GetReturnType(bool resolve) { return type; } -inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() { - // Using the default, check the class object size to make sure it matches the size of the - // object. - size_t this_size = sizeof(*this); -#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT - this_size += sizeof(void*) - sizeof(uint32_t); -#endif - DCHECK_EQ(GetClass()->GetObjectSize(), this_size); +template<typename RootVisitorType> +void ArtMethod::VisitRoots(RootVisitorType& visitor) { + visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier()); + visitor.VisitRootIfNonNull(dex_cache_resolved_methods_.AddressWithoutBarrier()); + visitor.VisitRootIfNonNull(dex_cache_resolved_types_.AddressWithoutBarrier()); +} + +inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) { + memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src), + ObjectSize(image_pointer_size)); + declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass()); + dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>( + const_cast<ArtMethod*>(src)->GetDexCacheResolvedMethods()); + dex_cache_resolved_types_ = GcRoot<mirror::ObjectArray<mirror::Class>>( + const_cast<ArtMethod*>(src)->GetDexCacheResolvedTypes()); } -} // namespace mirror } // namespace art -#endif // ART_RUNTIME_MIRROR_ART_METHOD_INL_H_ +#endif // ART_RUNTIME_ART_METHOD_INL_H_ diff --git a/runtime/mirror/art_method.cc b/runtime/art_method.cc index 9518c9d797..fbaf0ae217 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/art_method.cc @@ -16,12 +16,10 @@ #include "art_method.h" -#include "abstract_method.h" #include "arch/context.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/stringpiece.h" -#include "class-inl.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "entrypoints/entrypoint_utils.h" @@ -32,15 +30,15 @@ #include "jit/jit_code_cache.h" #include "jni_internal.h" #include "mapping_table.h" -#include "object_array-inl.h" -#include "object_array.h" -#include "object-inl.h" +#include "mirror/abstract_method.h" +#include "mirror/class-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "mirror/string.h" #include "scoped_thread_state_change.h" -#include "string.h" #include "well_known_classes.h" namespace art { -namespace mirror { extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*, const char*); @@ -49,9 +47,6 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th const char*); #endif -// TODO: get global references for these -GcRoot<Class> ArtMethod::java_lang_reflect_ArtMethod_; - ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) { auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method); @@ -59,17 +54,13 @@ ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnabl return abstract_method->GetArtMethod(); } -void ArtMethod::VisitRoots(RootVisitor* visitor) { - java_lang_reflect_ArtMethod_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); -} - mirror::String* ArtMethod::GetNameAsString(Thread* self) { - mirror::ArtMethod* method = GetInterfaceMethodIfProxy(); - const DexFile* dex_file = method->GetDexFile(); - uint32_t dex_method_idx = method->GetDexMethodIndex(); - const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx); + CHECK(!IsProxyMethod()); StackHandleScope<1> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache())); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache())); + auto* dex_file = dex_cache->GetDexFile(); + uint32_t dex_method_idx = GetDexMethodIndex(); + const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx); return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_, dex_cache); } @@ -87,17 +78,6 @@ InvokeType ArtMethod::GetInvokeType() { } } -void ArtMethod::SetClass(Class* java_lang_reflect_ArtMethod) { - CHECK(java_lang_reflect_ArtMethod_.IsNull()); - CHECK(java_lang_reflect_ArtMethod != nullptr); - java_lang_reflect_ArtMethod_ = GcRoot<Class>(java_lang_reflect_ArtMethod); -} - -void ArtMethod::ResetClass() { - CHECK(!java_lang_reflect_ArtMethod_.IsNull()); - java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr); -} - size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) { CHECK_LE(1U, shorty.length()); uint32_t num_registers = 0; @@ -129,31 +109,33 @@ static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2) return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2); } -ArtMethod* ArtMethod::FindOverriddenMethod() { +ArtMethod* ArtMethod::FindOverriddenMethod(size_t pointer_size) { if (IsStatic()) { return nullptr; } - Class* declaring_class = GetDeclaringClass(); - Class* super_class = declaring_class->GetSuperClass(); + mirror::Class* declaring_class = GetDeclaringClass(); + mirror::Class* super_class = declaring_class->GetSuperClass(); uint16_t method_index = GetMethodIndex(); ArtMethod* result = nullptr; // Did this method override a super class method? If so load the result from the super class' // vtable if (super_class->HasVTable() && method_index < super_class->GetVTableLength()) { - result = super_class->GetVTableEntry(method_index); + result = super_class->GetVTableEntry(method_index, pointer_size); } else { // Method didn't override superclass method so search interfaces if (IsProxyMethod()) { - result = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex()); + result = GetDexCacheResolvedMethods()->GetElementPtrSize<ArtMethod*>( + GetDexMethodIndex(), pointer_size); CHECK_EQ(result, Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this)); } else { - IfTable* iftable = GetDeclaringClass()->GetIfTable(); + mirror::IfTable* iftable = GetDeclaringClass()->GetIfTable(); for (size_t i = 0; i < iftable->Count() && result == nullptr; i++) { - Class* interface = iftable->GetInterface(i); + mirror::Class* interface = iftable->GetInterface(i); for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) { - mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j); - if (HasSameNameAndSignature(this, interface_method)) { + ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size); + if (HasSameNameAndSignature( + this, interface_method->GetInterfaceMethodIfProxy(sizeof(void*)))) { result = interface_method; break; } @@ -161,9 +143,8 @@ ArtMethod* ArtMethod::FindOverriddenMethod() { } } } - if (kIsDebugBuild) { - DCHECK(result == nullptr || HasSameNameAndSignature(this, result)); - } + DCHECK(result == nullptr || HasSameNameAndSignature( + GetInterfaceMethodIfProxy(sizeof(void*)), result->GetInterfaceMethodIfProxy(sizeof(void*)))); return result; } @@ -264,9 +245,9 @@ uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failur return UINTPTR_MAX; } -uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type, +uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc, bool* has_no_move_exception) { - const DexFile::CodeItem* code_item = h_this->GetCodeItem(); + const DexFile::CodeItem* code_item = GetCodeItem(); // Set aside the exception while we resolve its type. Thread* self = Thread::Current(); StackHandleScope<1> hs(self); @@ -283,7 +264,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep break; } // Does this catch exception type apply? - Class* iter_exception_type = h_this->GetClassFromTypeIndex(iter_type_idx, true); + mirror::Class* iter_exception_type = GetClassFromTypeIndex(iter_type_idx, true); if (UNLIKELY(iter_exception_type == nullptr)) { // Now have a NoClassDefFoundError as exception. Ignore in case the exception class was // removed by a pro-guard like tool. @@ -293,7 +274,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep // release its in use context at the end. delete self->GetLongJumpContext(); LOG(WARNING) << "Unresolved exception class when finding catch block: " - << DescriptorToDot(h_this->GetTypeDescriptorFromTypeIdx(iter_type_idx)); + << DescriptorToDot(GetTypeDescriptorFromTypeIdx(iter_type_idx)); } else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) { found_dex_pc = it.GetHandlerAddress(); break; @@ -375,7 +356,8 @@ const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) { #ifndef NDEBUG uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) { CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge()); - CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*))); + CHECK_EQ(quick_entry_point, + Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*))); return pc - reinterpret_cast<uintptr_t>(quick_entry_point); } #endif @@ -390,7 +372,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* if (kIsDebugBuild) { self->AssertThreadSuspensionIsAllowable(); CHECK_EQ(kRunnable, self->GetState()); - CHECK_STREQ(GetShorty(), shorty); + CHECK_STREQ(GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(), shorty); } // Push a transition back into managed code onto the linked list in thread. @@ -405,16 +387,20 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* if (IsStatic()) { art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result); } else { - Object* receiver = reinterpret_cast<StackReference<Object>*>(&args[0])->AsMirrorPtr(); + mirror::Object* receiver = + reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr(); art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result); } } else { - const bool kLogInvocationStartAndReturn = false; + DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); + + constexpr bool kLogInvocationStartAndReturn = false; bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr; if (LIKELY(have_quick_code)) { if (kLogInvocationStartAndReturn) { - LOG(INFO) << StringPrintf("Invoking '%s' quick code=%p", PrettyMethod(this).c_str(), - GetEntryPointFromQuickCompiledCode()); + LOG(INFO) << StringPrintf( + "Invoking '%s' quick code=%p static=%d", PrettyMethod(this).c_str(), + GetEntryPointFromQuickCompiledCode(), static_cast<int>(IsStatic() ? 1 : 0)); } // Ensure that we won't be accidentally calling quick compiled code when -Xint. @@ -481,6 +467,11 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); } + // This goes before IsProxyMethod since runtime methods have a null declaring class. + if (UNLIKELY(IsRuntimeMethod())) { + return runtime->GetRuntimeMethodFrameInfo(this); + } + // For Proxy method we add special handling for the direct method case (there is only one // direct method - constructor). Direct method is cloned from original // java.lang.reflect.Proxy class together with code and as a result it is executed as usual @@ -497,10 +488,6 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { } } - if (UNLIKELY(IsRuntimeMethod())) { - return runtime->GetRuntimeMethodFrameInfo(this); - } - const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)); ClassLinker* class_linker = runtime->GetClassLinker(); // On failure, instead of null we get the quick-generic-jni-trampoline for native method @@ -516,11 +503,9 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); // Callee saves + handle scope + method ref + alignment - size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size - - sizeof(void*) // callee-save frame stores a whole method pointer - + sizeof(StackReference<mirror::ArtMethod>), - kStackAlignment); - + // Note: -sizeof(void*) since callee-save frame stores a whole method pointer. + size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) + + sizeof(ArtMethod*) + scope_size, kStackAlignment); return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask()); } @@ -570,5 +555,4 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param return true; } -} // namespace mirror } // namespace art diff --git a/runtime/mirror/art_method.h b/runtime/art_method.h index 0da5925b6c..4a1e2c4532 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/art_method.h @@ -14,50 +14,61 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_MIRROR_ART_METHOD_H_ -#define ART_RUNTIME_MIRROR_ART_METHOD_H_ +#ifndef ART_RUNTIME_ART_METHOD_H_ +#define ART_RUNTIME_ART_METHOD_H_ #include "dex_file.h" #include "gc_root.h" #include "invoke_type.h" #include "method_reference.h" #include "modifiers.h" -#include "object.h" +#include "mirror/object.h" #include "object_callbacks.h" #include "quick/quick_method_frame_info.h" #include "read_barrier_option.h" #include "stack.h" #include "stack_map.h" +#include "utils.h" namespace art { -struct ArtMethodOffsets; -struct ConstructorMethodOffsets; union JValue; class ScopedObjectAccessAlreadyRunnable; class StringPiece; class ShadowFrame; namespace mirror { +class Array; +class Class; +class PointerArray; +} // namespace mirror typedef void (EntryPointFromInterpreter)(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result); -#define ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT - -// C++ mirror of java.lang.reflect.ArtMethod. -class MANAGED ArtMethod FINAL : public Object { +class ArtMethod FINAL { public: - // Size of java.lang.reflect.ArtMethod.class. - static uint32_t ClassSize(); + ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0), + method_index_(0) { } + + ArtMethod(const ArtMethod& src, size_t image_pointer_size) { + CopyFrom(&src, image_pointer_size); + } static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* GetDeclaringClass() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetDeclaringClass(mirror::Class *new_declaring_class) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static MemberOffset DeclaringClassOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_)); @@ -65,9 +76,9 @@ class MANAGED ArtMethod FINAL : public Object { ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessFlags(uint32_t new_access_flags) { // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), new_access_flags); + access_flags_ = new_access_flags; } // Approximate what kind of method call would be used for this method. @@ -180,7 +191,7 @@ class MANAGED ArtMethod FINAL : public Object { void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index); + method_index_ = new_method_index; } static MemberOffset DexMethodIndexOffset() { @@ -191,13 +202,13 @@ class MANAGED ArtMethod FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_); } - uint32_t GetCodeItemOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_)); + uint32_t GetCodeItemOffset() { + return dex_code_item_offset_; } - void SetCodeItemOffset(uint32_t new_code_off) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetCodeItemOffset(uint32_t new_code_off) { // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), new_code_off); + dex_code_item_offset_ = new_code_off; } // Number of 32bit registers that would be required to hold all the arguments @@ -205,9 +216,9 @@ class MANAGED ArtMethod FINAL : public Object { ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDexMethodIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexMethodIndex(uint32_t new_idx) { // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx); + dex_method_index_ = new_idx; } static MemberOffset DexCacheResolvedMethodsOffset() { @@ -218,26 +229,29 @@ class MANAGED ArtMethod FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_); } - ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() + ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx) + ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method) + ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method, + size_t ptr_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) + ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache) + bool HasSameDexCacheResolvedMethods(ArtMethod* other) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <bool kWithCheck = true> - Class* GetDexCacheResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types) + mirror::Class* GetDexCacheResolvedType(uint32_t type_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache) + bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the Class* from the type index into this method's dex cache. @@ -245,7 +259,7 @@ class MANAGED ArtMethod FINAL : public Object { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Find the method that this method overrides. - ArtMethod* FindOverriddenMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then // return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same @@ -258,59 +272,39 @@ class MANAGED ArtMethod FINAL : public Object { void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - EntryPointFromInterpreter* GetEntryPointFromInterpreter() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); + EntryPointFromInterpreter* GetEntryPointFromInterpreter() { return GetEntryPointFromInterpreterPtrSize(sizeof(void*)); } - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>( + EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) { + return GetEntryPoint<EntryPointFromInterpreter*>( EntryPointFromInterpreterOffset(pointer_size), pointer_size); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); + void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) { SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*)); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter, - size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtrWithSize<false, true, kVerifyFlags>( - EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size); + size_t pointer_size) { + SetEntryPoint(EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, + pointer_size); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); + const void* GetEntryPointFromQuickCompiledCode() { return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*)); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtrWithSize<const void*, kVerifyFlags>( + ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) { + return GetEntryPoint<const void*>( EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); + void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) { SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code, sizeof(void*)); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize( - const void* entry_point_from_quick_compiled_code, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtrWithSize<false, true, kVerifyFlags>( - EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code, - pointer_size); + const void* entry_point_from_quick_compiled_code, size_t pointer_size) { + SetEntryPoint(EntryPointFromQuickCompiledCodeOffset(pointer_size), + entry_point_from_quick_compiled_code, pointer_size); } uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -318,7 +312,7 @@ class MANAGED ArtMethod FINAL : public Object { // Check whether the given PC is within the quick compiled code associated with this method's // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for // debug purposes. - bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool PcIsWithinQuickCode(uintptr_t pc) { return PcIsWithinQuickCode( reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc); } @@ -330,8 +324,8 @@ class MANAGED ArtMethod FINAL : public Object { // interpretered on invocation. bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetQuickOatCodeOffset(); + void SetQuickOatCodeOffset(uint32_t code_offset); ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) { uintptr_t code = reinterpret_cast<uintptr_t>(entry_point); @@ -394,7 +388,7 @@ class MANAGED ArtMethod FINAL : public Object { } FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - constexpr size_t handle_scope_offset = sizeof(StackReference<mirror::ArtMethod>); + constexpr size_t handle_scope_offset = sizeof(ArtMethod*); DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes()); return FrameOffset(handle_scope_offset); } @@ -419,30 +413,23 @@ class MANAGED ArtMethod FINAL : public Object { PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size); } - void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); + void* GetEntryPointFromJni() { return GetEntryPointFromJniPtrSize(sizeof(void*)); } - ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size); + ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) { + return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckObjectSizeEqualsMirrorSize(); - SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*)); + SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*)); } - template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtrWithSize<false, true, kVerifyFlags>( - EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size); + ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) { + SetEntryPoint(EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size); } // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal // conventions for a method of managed code. Returns false for Proxy methods. - bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool IsRuntimeMethod(); // Is this a hand crafted method used for something like describing callee saves? bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -479,19 +466,12 @@ class MANAGED ArtMethod FINAL : public Object { // Find the catch block for the given exception type and dex_pc. When a catch block is found, // indicates whether the found catch block is responsible for clearing the exception or whether // a move-exception instruction is present. - static uint32_t FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type, - uint32_t dex_pc, bool* has_no_move_exception) + uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc, + bool* has_no_move_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void SetClass(Class* java_lang_reflect_ArtMethod); - - template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - static Class* GetJavaLangReflectArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - static void ResetClass(); - - static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<typename RootVisitorType> + void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -539,37 +519,35 @@ class MANAGED ArtMethod FINAL : public Object { mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // May cause thread suspension due to class resolution. bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static size_t SizeWithoutPointerFields(size_t pointer_size) { - size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields); -#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT - // Add 4 bytes if 64 bit, otherwise 0. - total += pointer_size - sizeof(uint32_t); -#endif - return total; - } - - // Size of an instance of java.lang.reflect.ArtMethod not including its value array. - static size_t InstanceSize(size_t pointer_size) { - return SizeWithoutPointerFields(pointer_size) + + // Size of an instance of this object. + static size_t ObjectSize(size_t pointer_size) { + return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size; } + void CopyFrom(const ArtMethod* src, size_t image_pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". // The class we are a part of. - HeapReference<Class> declaring_class_; + GcRoot<mirror::Class> declaring_class_; // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access. - HeapReference<ObjectArray<ArtMethod>> dex_cache_resolved_methods_; + GcRoot<mirror::PointerArray> dex_cache_resolved_methods_; // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access. - HeapReference<ObjectArray<Class>> dex_cache_resolved_types_; + GcRoot<mirror::ObjectArray<mirror::Class>> dex_cache_resolved_types_; // Access flags; low 16 bits are defined by spec. uint32_t access_flags_; @@ -592,6 +570,8 @@ class MANAGED ArtMethod FINAL : public Object { // Fake padding field gets inserted here. // Must be the last fields in the method. + // PACKED(4) is necessary for the correctness of + // RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size). struct PACKED(4) PtrSizedFields { // Method dispatch from the interpreter invokes this pointer which may cause a bridge into // compiled code. @@ -605,21 +585,36 @@ class MANAGED ArtMethod FINAL : public Object { void* entry_point_from_quick_compiled_code_; } ptr_sized_fields_; - static GcRoot<Class> java_lang_reflect_ArtMethod_; - private: - ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static size_t PtrSizedFieldsOffset(size_t pointer_size) { - size_t offset = OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_); -#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT - // Add 4 bytes if 64 bit, otherwise 0. - offset += pointer_size - sizeof(uint32_t); -#endif - return offset; + // Round up to pointer size for padding field. + return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size); + } + + template<typename T> + ALWAYS_INLINE T GetEntryPoint(MemberOffset offset, size_t pointer_size) const { + DCHECK(ValidPointerSize(pointer_size)) << pointer_size; + const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value(); + if (pointer_size == sizeof(uint32_t)) { + return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr)); + } else { + auto v = *reinterpret_cast<const uint64_t*>(addr); + DCHECK_EQ(reinterpret_cast<uint64_t>(reinterpret_cast<T>(v)), v) << "Conversion lost bits"; + return reinterpret_cast<T>(v); + } + } + + template<typename T> + ALWAYS_INLINE void SetEntryPoint(MemberOffset offset, T new_value, size_t pointer_size) { + DCHECK(ValidPointerSize(pointer_size)) << pointer_size; + const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value(); + if (pointer_size == sizeof(uint32_t)) { + uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value); + DCHECK_EQ(static_cast<uint32_t>(ptr), ptr) << "Conversion lost bits"; + *reinterpret_cast<uint32_t*>(addr) = static_cast<uint32_t>(ptr); + } else { + *reinterpret_cast<uint64_t*>(addr) = reinterpret_cast<uintptr_t>(new_value); + } } // Code points to the start of the quick code. @@ -640,11 +635,9 @@ class MANAGED ArtMethod FINAL : public Object { EntryPointToCodePointer(reinterpret_cast<const void*>(code))); } - friend struct art::ArtMethodOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod); + DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits. }; -} // namespace mirror } // namespace art -#endif // ART_RUNTIME_MIRROR_ART_METHOD_H_ +#endif // ART_RUNTIME_ART_METHOD_H_ diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 3e677a4dbe..d7efe1c21e 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -18,8 +18,8 @@ #define ART_RUNTIME_ASM_SUPPORT_H_ #if defined(__cplusplus) +#include "art_method.h" #include "lock_word.h" -#include "mirror/art_method.h" #include "mirror/class.h" #include "mirror/string.h" #include "runtime.h" @@ -69,12 +69,12 @@ ADD_TEST_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET), art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kSaveAll)) // Offset of field Runtime::callee_save_methods_[kRefsOnly] -#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET COMPRESSED_REFERENCE_SIZE +#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8 ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET), art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsOnly)) // Offset of field Runtime::callee_save_methods_[kRefsAndArgs] -#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * COMPRESSED_REFERENCE_SIZE) +#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * 8) ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET), art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsAndArgs)) @@ -135,13 +135,13 @@ ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object)) #define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art::mirror::Class::ComponentTypeOffset().Int32Value()) -#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (44 + MIRROR_OBJECT_HEADER_SIZE) +#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET, art::mirror::Class::AccessFlagsOffset().Int32Value()) -#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE) +#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET, art::mirror::Class::ObjectSizeOffset().Int32Value()) -#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE) +#define MIRROR_CLASS_STATUS_OFFSET (124 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET, art::mirror::Class::StatusOffset().Int32Value()) @@ -169,6 +169,10 @@ ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET, ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), sizeof(art::mirror::HeapReference<art::mirror::Object>)) +#define MIRROR_LONG_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE) +ADD_TEST_EQ(MIRROR_LONG_ARRAY_DATA_OFFSET, + art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value()) + // Offsets within java.lang.String. #define MIRROR_STRING_COUNT_OFFSET MIRROR_OBJECT_HEADER_SIZE ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value()) @@ -177,21 +181,21 @@ ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32 ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value()) // Offsets within java.lang.reflect.ArtMethod. -#define MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE) -ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET, - art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()) +#define ART_METHOD_DEX_CACHE_METHODS_OFFSET 4 +ADD_TEST_EQ(ART_METHOD_DEX_CACHE_METHODS_OFFSET, + art::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()) -#define MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE) -ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_TYPES_OFFSET, - art::mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()) +#define ART_METHOD_DEX_CACHE_TYPES_OFFSET 8 +ADD_TEST_EQ(ART_METHOD_DEX_CACHE_TYPES_OFFSET, + art::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()) -#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (36 + MIRROR_OBJECT_HEADER_SIZE) -ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32, - art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()) +#define ART_METHOD_QUICK_CODE_OFFSET_32 36 +ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_32, + art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()) -#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE) -ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64, - art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value()) +#define ART_METHOD_QUICK_CODE_OFFSET_64 48 +ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_64, + art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value()) #define LOCK_WORD_STATE_SHIFT 30 ADD_TEST_EQ(LOCK_WORD_STATE_SHIFT, static_cast<int32_t>(art::LockWord::kStateShift)) diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index b53fa84f5c..8f2d94b564 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -302,6 +302,18 @@ void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) { end_ = new_arena->End(); } +bool ArenaAllocator::Contains(const void* ptr) const { + if (ptr >= begin_ && ptr < end_) { + return true; + } + for (const Arena* cur_arena = arena_head_; cur_arena != nullptr; cur_arena = cur_arena->next_) { + if (cur_arena->Contains(ptr)) { + return true; + } + } + return false; +} + MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena, ssize_t lost_bytes_adjustment) : name_(name), diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 2e617b500a..d9723b57de 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -142,6 +142,11 @@ class Arena { return bytes_allocated_; } + // Return true if ptr is contained in the arena. + bool Contains(const void* ptr) const { + return memory_ <= ptr && ptr < memory_ + bytes_allocated_; + } + protected: size_t bytes_allocated_; uint8_t* memory_; @@ -219,19 +224,52 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats return ret; } + // Realloc never frees the input pointer, it is the caller's job to do this if necessary. + void* Realloc(void* ptr, size_t ptr_size, size_t new_size, + ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { + DCHECK_GE(new_size, ptr_size); + DCHECK_EQ(ptr == nullptr, ptr_size == 0u); + auto* end = reinterpret_cast<uint8_t*>(ptr) + ptr_size; + // If we haven't allocated anything else, we can safely extend. + if (end == ptr_) { + const size_t size_delta = new_size - ptr_size; + // Check remain space. + const size_t remain = end_ - ptr_; + if (remain >= size_delta) { + ptr_ += size_delta; + ArenaAllocatorStats::RecordAlloc(size_delta, kind); + return ptr; + } + } + auto* new_ptr = Alloc(new_size, kind); + memcpy(new_ptr, ptr, ptr_size); + // TODO: Call free on ptr if linear alloc supports free. + return new_ptr; + } + template <typename T> T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) { return static_cast<T*>(Alloc(length * sizeof(T), kind)); } void* AllocValgrind(size_t bytes, ArenaAllocKind kind); + void ObtainNewArenaForAllocation(size_t allocation_size); + size_t BytesAllocated() const; + MemStats GetMemStats() const; + // The BytesUsed method sums up bytes allocated from arenas in arena_head_ and nodes. // TODO: Change BytesAllocated to this behavior? size_t BytesUsed() const; + ArenaPool* GetArenaPool() const { + return pool_; + } + + bool Contains(const void* ptr) const; + private: static constexpr size_t kAlignment = 8; diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h index 5a46376237..6a0ef1f585 100644 --- a/runtime/base/iteration_range.h +++ b/runtime/base/iteration_range.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_BASE_ITERATION_RANGE_H_ #define ART_RUNTIME_BASE_ITERATION_RANGE_H_ +#include <iterator> + namespace art { // Helper class that acts as a container for range-based loops, given an iteration @@ -38,10 +40,15 @@ class IterationRange { iterator cend() const { return last_; } private: - iterator first_; - iterator last_; + const iterator first_; + const iterator last_; }; +template <typename Iter> +static inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) { + return IterationRange<Iter>(begin_it, end_it); +} + } // namespace art #endif // ART_RUNTIME_BASE_ITERATION_RANGE_H_ diff --git a/runtime/base/macros.h b/runtime/base/macros.h index c00ae78be8..5c596471c2 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -50,7 +50,6 @@ friend class test_set_name##_##individual_test##_Test #define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\ template<typename T> ART_FRIEND_TEST(test_set_name, individual_test) - // DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private: // declarations in a class. #if !defined(DISALLOW_COPY_AND_ASSIGN) diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h index df79085626..82db60e4e4 100644 --- a/runtime/base/scoped_arena_containers.h +++ b/runtime/base/scoped_arena_containers.h @@ -20,6 +20,7 @@ #include <deque> #include <queue> #include <set> +#include <unordered_map> #include <vector> #include "arena_containers.h" // For ArenaAllocatorAdapterKind. @@ -55,6 +56,11 @@ template <typename K, typename V, typename Comparator = std::less<K>> using ScopedArenaSafeMap = SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>; +template <typename K, typename V, class Hash = std::hash<K>, class KeyEqual = std::equal_to<K>> +using ScopedArenaUnorderedMap = + std::unordered_map<K, V, Hash, KeyEqual, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>; + + // Implementation details below. template <> diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index 30084d2b51..549eac2016 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -20,6 +20,7 @@ #include <zlib.h> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/to_str.h" #include "class_linker.h" @@ -28,7 +29,6 @@ #include "gc/space/space.h" #include "java_vm_ext.h" #include "jni_internal.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -200,7 +200,7 @@ class ScopedCheck { bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc, jmethodID mid, Primitive::Type type, InvokeType invoke) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; } @@ -270,7 +270,7 @@ class ScopedCheck { */ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; } @@ -291,7 +291,7 @@ class ScopedCheck { */ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; } @@ -344,7 +344,7 @@ class ScopedCheck { */ bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* traceMethod = nullptr; + ArtMethod* traceMethod = nullptr; if (has_method_ && soa.Vm()->IsTracingEnabled()) { // We need to guard some of the invocation interface's calls: a bad caller might // use DetachCurrentThread or GetEnv on a thread that's not yet attached. @@ -399,7 +399,7 @@ class ScopedCheck { Thread* self = Thread::Current(); if ((flags_ & kFlag_Invocation) == 0 || self != nullptr) { ScopedObjectAccess soa(self); - mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr); + ArtMethod* traceMethod = self->GetCurrentMethod(nullptr); should_trace = (traceMethod != nullptr && vm->ShouldTrace(traceMethod)); } } @@ -418,7 +418,7 @@ class ScopedCheck { if (has_method_) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); - mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr); + ArtMethod* traceMethod = self->GetCurrentMethod(nullptr); std::string methodName(PrettyMethod(traceMethod, false)); LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")"; indent_ = methodName.size() + 1; @@ -462,13 +462,13 @@ class ScopedCheck { bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = soa.DecodeMethod(mid); + ArtMethod* method = soa.DecodeMethod(mid); if (method == nullptr) { AbortF("expected non-null constructor"); return false; } if (!method->IsConstructor() || method->IsStatic()) { - AbortF("expected a constructor but %s: %p", PrettyTypeOf(method).c_str(), mid); + AbortF("expected a constructor but %s: %p", PrettyMethod(method).c_str(), mid); return false; } return true; @@ -825,7 +825,7 @@ class ScopedCheck { } case 'm': { // jmethodID jmethodID mid = arg.m; - mirror::ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = soa.DecodeMethod(mid); *msg += PrettyMethod(m); if (!entry) { StringAppendF(msg, " (%p)", mid); @@ -998,14 +998,15 @@ class ScopedCheck { return f; } - mirror::ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid) + ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (mid == nullptr) { AbortF("jmethodID was NULL"); return nullptr; } - mirror::ArtMethod* m = soa.DecodeMethod(mid); - if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) { + ArtMethod* m = soa.DecodeMethod(mid); + // TODO: Better check here. + if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) { Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); AbortF("invalid jmethodID: %p", mid); return nullptr; diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index d87a563d73..d323379e4c 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_ #define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_ +#include "art_method-inl.h" #include "gc_map.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack_map.h" @@ -32,7 +32,7 @@ class CheckReferenceMapVisitor : public StackVisitor { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsCalleeSaveMethod() || m->IsNative()) { CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex); } @@ -63,7 +63,7 @@ class CheckReferenceMapVisitor : public StackVisitor { private: void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); CodeInfo code_info = m->GetOptimizedCodeInfo(); StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset); uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_; @@ -104,7 +104,7 @@ class CheckReferenceMapVisitor : public StackVisitor { void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*))); const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset); CHECK(ref_bitmap); diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 1428749aca..df6703cc7b 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -57,7 +57,7 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** } inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(resolved_string == nullptr)) { @@ -73,7 +73,7 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, } inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx); if (UNLIKELY(resolved_type == nullptr)) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); @@ -104,30 +104,27 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* refe return resolved_type; } -inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, - mirror::ArtMethod* referrer) { - mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx); +inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) { + ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod( + method_idx, image_pointer_size_); if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) { return nullptr; } return resolved_method; } -inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx, - mirror::ArtMethod** referrer, - InvokeType type) { - mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer); - if (LIKELY(resolved_method != nullptr)) { - return resolved_method; +inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx, + ArtMethod* referrer, InvokeType type) { + ArtMethod* resolved_method = GetResolvedMethod(method_idx, referrer); + if (UNLIKELY(resolved_method == nullptr)) { + mirror::Class* declaring_class = referrer->GetDeclaringClass(); + StackHandleScope<2> hs(self); + Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache())); + Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader())); + const DexFile* dex_file = h_dex_cache->GetDexFile(); + resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, referrer, + type); } - mirror::Class* declaring_class = (*referrer)->GetDeclaringClass(); - StackHandleScope<3> hs(self); - Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache())); - Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader())); - HandleWrapper<mirror::ArtMethod> h_referrer(hs.NewHandleWrapper(referrer)); - const DexFile* dex_file = h_dex_cache->GetDexFile(); - resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, h_referrer, - type); // Note: We cannot check here to see whether we added the method to the cache. It // might be an erroneous class, which results in it being hidden from us. return resolved_method; @@ -142,8 +139,8 @@ inline ArtField* ClassLinker::GetResolvedField( return GetResolvedField(field_idx, field_declaring_class->GetDexCache()); } -inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer, - bool is_static) { +inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referrer, + bool is_static) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); ArtField* resolved_field = GetResolvedField(field_idx, declaring_class); if (UNLIKELY(resolved_field == nullptr)) { @@ -179,12 +176,6 @@ inline mirror::ObjectArray<mirror::String>* ClassLinker::AllocStringArray(Thread length); } -inline mirror::ObjectArray<mirror::ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, - size_t length) { - return mirror::ObjectArray<mirror::ArtMethod>::Alloc(self, - GetClassRoot(kJavaLangReflectArtMethodArrayClass), length); -} - inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) { return down_cast<mirror::IfTable*>( mirror::IfTable::Alloc(self, GetClassRoot(kObjectArrayClass), diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index a028942c7a..fb2debd63f 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -26,8 +26,11 @@ #include <vector> #include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/arena_allocator.h" #include "base/casts.h" #include "base/logging.h" +#include "base/scoped_arena_containers.h" #include "base/scoped_flock.h" #include "base/stl_util.h" #include "base/time_utils.h" @@ -54,7 +57,6 @@ #include "oat_file.h" #include "oat_file_assistant.h" #include "object_lock.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" @@ -94,9 +96,9 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_end(args); } -static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = self->GetCurrentMethod(nullptr); +bool ClassLinker::HasInitWithString( + Thread* self, ClassLinker* class_linker, const char* descriptor) { + ArtMethod* method = self->GetCurrentMethod(nullptr); StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ? method->GetDeclaringClass()->GetClassLoader() @@ -110,8 +112,8 @@ static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const cha return false; } - mirror::ArtMethod* exception_init_method = - exception_class->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V"); + ArtMethod* exception_init_method = exception_class->FindDeclaredDirectMethod( + "<init>", "(Ljava/lang/String;)V", image_pointer_size_); return exception_init_method != nullptr; } @@ -275,46 +277,51 @@ ClassLinker::ClassLinker(InternTable* intern_table) quick_to_interpreter_bridge_trampoline_(nullptr), image_pointer_size_(sizeof(void*)) { CHECK(intern_table_ != nullptr); - for (size_t i = 0; i < kFindArrayCacheSize; ++i) { - find_array_class_cache_[i] = GcRoot<mirror::Class>(nullptr); + for (auto& root : find_array_class_cache_) { + root = GcRoot<mirror::Class>(nullptr); } } void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path) { VLOG(startup) << "ClassLinker::Init"; - CHECK(!Runtime::Current()->GetHeap()->HasImageSpace()) << "Runtime has image. We should use it."; + Thread* const self = Thread::Current(); + Runtime* const runtime = Runtime::Current(); + gc::Heap* const heap = runtime->GetHeap(); + + CHECK(!heap->HasImageSpace()) << "Runtime has image. We should use it."; CHECK(!init_done_); + // Use the pointer size from the runtime since we are probably creating the image. + image_pointer_size_ = InstructionSetPointerSize(runtime->GetInstructionSet()); + // java_lang_Class comes first, it's needed for AllocClass - Thread* const self = Thread::Current(); - gc::Heap* const heap = Runtime::Current()->GetHeap(); // The GC can't handle an object with a null class since we can't get the size of this object. heap->IncrementDisableMovingGC(self); StackHandleScope<64> hs(self); // 64 is picked arbitrarily. + auto class_class_size = mirror::Class::ClassClassSize(image_pointer_size_); Handle<mirror::Class> java_lang_Class(hs.NewHandle(down_cast<mirror::Class*>( - heap->AllocNonMovableObject<true>(self, nullptr, - mirror::Class::ClassClassSize(), - VoidFunctor())))); + heap->AllocNonMovableObject<true>(self, nullptr, class_class_size, VoidFunctor())))); CHECK(java_lang_Class.Get() != nullptr); mirror::Class::SetClassClass(java_lang_Class.Get()); java_lang_Class->SetClass(java_lang_Class.Get()); if (kUseBakerOrBrooksReadBarrier) { java_lang_Class->AssertReadBarrierPointer(); } - java_lang_Class->SetClassSize(mirror::Class::ClassClassSize()); + java_lang_Class->SetClassSize(class_class_size); java_lang_Class->SetPrimitiveType(Primitive::kPrimNot); heap->DecrementDisableMovingGC(self); // AllocClass(mirror::Class*) can now be used // Class[] is used for reflection support. + auto class_array_class_size = mirror::ObjectArray<mirror::Class>::ClassSize(image_pointer_size_); Handle<mirror::Class> class_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::ObjectArray<mirror::Class>::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), class_array_class_size))); class_array_class->SetComponentType(java_lang_Class.Get()); // java_lang_Object comes next so that object_array_class can be created. Handle<mirror::Class> java_lang_Object(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Object::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::Object::ClassSize(image_pointer_size_)))); CHECK(java_lang_Object.Get() != nullptr); // backfill Object as the super class of Class. java_lang_Class->SetSuperClass(java_lang_Object.Get()); @@ -322,12 +329,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Object[] next to hold class roots. Handle<mirror::Class> object_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::ObjectArray<mirror::Object>::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), + mirror::ObjectArray<mirror::Object>::ClassSize(image_pointer_size_)))); object_array_class->SetComponentType(java_lang_Object.Get()); // Setup the char (primitive) class to be used for char[]. Handle<mirror::Class> char_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Class::PrimitiveClassSize()))); + AllocClass(self, java_lang_Class.Get(), + mirror::Class::PrimitiveClassSize(image_pointer_size_)))); // The primitive char class won't be initialized by // InitializePrimitiveClass until line 459, but strings (and // internal char arrays) will be allocated before that and the @@ -337,21 +346,20 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Setup the char[] class to be used for String. Handle<mirror::Class> char_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), - mirror::Array::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); char_array_class->SetComponentType(char_class.Get()); mirror::CharArray::SetArrayClass(char_array_class.Get()); // Setup String. Handle<mirror::Class> java_lang_String(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_)))); mirror::String::SetClass(java_lang_String.Get()); mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self); java_lang_String->SetStringClass(); // Setup java.lang.ref.Reference. Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::Reference::ClassSize(image_pointer_size_)))); mirror::Reference::SetClass(java_lang_ref_Reference.Get()); java_lang_ref_Reference->SetObjectSize(mirror::Reference::InstanceSize()); mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusResolved, self); @@ -384,14 +392,14 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Create int array type for AllocDexCache (done in AppendToBootClassPath). Handle<mirror::Class> int_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt)); mirror::IntArray::SetArrayClass(int_array_class.Get()); SetClassRoot(kIntArrayClass, int_array_class.Get()); // Create long array type for AllocDexCache (done in AppendToBootClassPath). Handle<mirror::Class> long_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); long_array_class->SetComponentType(GetClassRoot(kPrimitiveLong)); mirror::LongArray::SetArrayClass(long_array_class.Get()); SetClassRoot(kLongArrayClass, long_array_class.Get()); @@ -400,35 +408,22 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache. Handle<mirror::Class> java_lang_DexCache(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::DexCache::ClassSize()))); + AllocClass(self, java_lang_Class.Get(), mirror::DexCache::ClassSize(image_pointer_size_)))); SetClassRoot(kJavaLangDexCache, java_lang_DexCache.Get()); java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize()); mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self); - // Constructor, Method, and AbstractMethod are necessary so - // that FindClass can link members. - - Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize()))); - CHECK(java_lang_reflect_ArtMethod.Get() != nullptr); - size_t pointer_size = GetInstructionSetPointerSize(Runtime::Current()->GetInstructionSet()); - java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(pointer_size)); - SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get()); - mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusResolved, self); - mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get()); - // Set up array classes for string, field, method Handle<mirror::Class> object_array_string(hs.NewHandle( AllocClass(self, java_lang_Class.Get(), - mirror::ObjectArray<mirror::String>::ClassSize()))); + mirror::ObjectArray<mirror::String>::ClassSize(image_pointer_size_)))); object_array_string->SetComponentType(java_lang_String.Get()); SetClassRoot(kJavaLangStringArrayClass, object_array_string.Get()); - Handle<mirror::Class> object_array_art_method(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), - mirror::ObjectArray<mirror::ArtMethod>::ClassSize()))); - object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.Get()); - SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.Get()); + // Create runtime resolution and imt conflict methods. + runtime->SetResolutionMethod(runtime->CreateResolutionMethod()); + runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod()); + runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod()); // Setup boot_class_path_ and register class_path now that we can use AllocObjectArray to create // DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses @@ -446,13 +441,6 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b InitializePrimitiveClass(char_class.Get(), Primitive::kPrimChar); SetClassRoot(kPrimitiveChar, char_class.Get()); // needs descriptor - // Create runtime resolution and imt conflict methods. Also setup the default imt. - Runtime* runtime = Runtime::Current(); - runtime->SetResolutionMethod(runtime->CreateResolutionMethod()); - runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod()); - runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod()); - runtime->SetDefaultImt(runtime->CreateDefaultImt(this)); - // Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that // we do not need friend classes or a publicly exposed setter. quick_generic_jni_trampoline_ = GetQuickGenericJniStub(); @@ -529,13 +517,8 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // dex_cache_ fields and register them in class_table_. CHECK_EQ(java_lang_Class.Get(), FindSystemClass(self, "Ljava/lang/Class;")); - mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusNotReady, self); - CHECK_EQ(java_lang_reflect_ArtMethod.Get(), - FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;")); CHECK_EQ(object_array_string.Get(), FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass))); - CHECK_EQ(object_array_art_method.Get(), - FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass))); // End of special init trickery, subsequent classes may be loaded via FindSystemClass. @@ -579,7 +562,8 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self); CHECK_EQ(java_lang_ref_Reference.Get(), FindSystemClass(self, "Ljava/lang/ref/Reference;")); CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize()); - CHECK_EQ(java_lang_ref_Reference->GetClassSize(), mirror::Reference::ClassSize()); + CHECK_EQ(java_lang_ref_Reference->GetClassSize(), + mirror::Reference::ClassSize(image_pointer_size_)); class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;"); class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference | kAccClassIsFinalizerReference); @@ -1027,24 +1011,41 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& return nullptr; } -void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) { - ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg); - DCHECK(obj != nullptr); - DCHECK(class_linker != nullptr); - if (obj->IsArtMethod()) { - mirror::ArtMethod* method = obj->AsArtMethod(); - if (!method->IsNative()) { - const size_t pointer_size = class_linker->image_pointer_size_; - method->SetEntryPointFromInterpreterPtrSize(artInterpreterToInterpreterBridge, pointer_size); - if (!method->IsRuntimeMethod() && method != Runtime::Current()->GetResolutionMethod()) { - method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), - pointer_size); - } +static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, + gc::space::ImageSpace* space) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (m->IsRuntimeMethod()) { + CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m); + } else if (m->IsMiranda()) { + CHECK(m->GetDeclaringClass() != nullptr) << PrettyMethod(m); + } else if (expected_class != nullptr) { + CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << PrettyMethod(m); + } + if (space != nullptr) { + auto& header = space->GetImageHeader(); + auto& methods = header.GetMethodsSection(); + auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin(); + CHECK(methods.Contains(offset)) << m << " not in " << methods; + } +} + +static void SanityCheckArtMethodPointerArray( + mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size, + gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CHECK(arr != nullptr); + for (int32_t j = 0; j < arr->GetLength(); ++j) { + auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size); + // expected_class == null means we are a dex cache. + if (expected_class != nullptr) { + CHECK(method != nullptr); + } + if (method != nullptr) { + SanityCheckArtMethod(method, expected_class, space); } } } -void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) +static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(obj != nullptr); CHECK(obj->GetClass() != nullptr) << "Null class " << obj; @@ -1058,6 +1059,36 @@ void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) CHECK_EQ(fields[i][j].GetDeclaringClass(), klass); } } + auto* runtime = Runtime::Current(); + auto* image_space = runtime->GetHeap()->GetImageSpace(); + auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize(); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + SanityCheckArtMethod(&m, klass, image_space); + } + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + SanityCheckArtMethod(&m, klass, image_space); + } + auto* vtable = klass->GetVTable(); + if (vtable != nullptr) { + SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_space); + } + if (klass->ShouldHaveEmbeddedImtAndVTable()) { + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + SanityCheckArtMethod(klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_space); + } + for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) { + SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_space); + } + } + auto* iftable = klass->GetIfTable(); + if (iftable != nullptr) { + for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { + if (iftable->GetMethodArrayCount(i) > 0) { + SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr, pointer_size, + image_space); + } + } + } } } @@ -1069,8 +1100,9 @@ void ClassLinker::InitFromImage() { Thread* const self = Thread::Current(); gc::Heap* const heap = runtime->GetHeap(); gc::space::ImageSpace* const space = heap->GetImageSpace(); - dex_cache_image_class_lookup_required_ = true; CHECK(space != nullptr); + image_pointer_size_ = space->GetImageHeader().GetPointerSize(); + dex_cache_image_class_lookup_required_ = true; OatFile& oat_file = GetImageOatFile(space); CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U); CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U); @@ -1113,34 +1145,28 @@ void ClassLinker::InitFromImage() { UNREACHABLE(); } + if (kSanityCheckObjects) { + SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(), nullptr, + image_pointer_size_, space); + } + CHECK_EQ(dex_file->GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum()); AppendToBootClassPath(*dex_file.get(), dex_cache); opened_dex_files_.push_back(std::move(dex_file)); } + CHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_; + // Set classes on AbstractMethod early so that IsMethod tests can be performed during the live // bitmap walk. - mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod)); - size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize(); if (!runtime->IsAotCompiler()) { - // Aot compiler supports having an image with a different pointer size than the runtime. This - // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may - // also use 32 bit dex2oat on a system with 64 bit apps. - CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*))) - << sizeof(void*); - } - if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) { - image_pointer_size_ = 4; - } else { - CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8)); - image_pointer_size_ = 8; + // Only the Aot compiler supports having an image with a different pointer size than the + // runtime. This happens on the host for compile 32 bit tests since we use a 64 bit libart + // compiler. We may also use 32 bit dex2oat on a system with 64 bit apps. + CHECK_EQ(image_pointer_size_, sizeof(void*)); } - // Set entry point to interpreter if in InterpretOnly mode. - if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) { - heap->VisitObjects(InitFromImageInterpretOnlyCallback, this); - } if (kSanityCheckObjects) { for (int32_t i = 0; i < dex_caches->GetLength(); i++) { auto* dex_cache = dex_caches->Get(i); @@ -1154,6 +1180,27 @@ void ClassLinker::InitFromImage() { heap->VisitObjects(SanityCheckObjectsCallback, nullptr); } + // Set entry point to interpreter if in InterpretOnly mode. + if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) { + const auto& header = space->GetImageHeader(); + const auto& methods = header.GetMethodsSection(); + const auto art_method_size = ArtMethod::ObjectSize(image_pointer_size_); + for (uintptr_t pos = 0; pos < methods.Size(); pos += art_method_size) { + auto* method = reinterpret_cast<ArtMethod*>(space->Begin() + pos + methods.Offset()); + if (kIsDebugBuild && !method->IsRuntimeMethod()) { + CHECK(method->GetDeclaringClass() != nullptr); + } + if (!method->IsNative()) { + method->SetEntryPointFromInterpreterPtrSize( + artInterpreterToInterpreterBridge, image_pointer_size_); + if (!method->IsRuntimeMethod() && method != runtime->GetResolutionMethod()) { + method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), + image_pointer_size_); + } + } + } + } + // reinit class_roots_ mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass)); class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get()); @@ -1185,24 +1232,55 @@ void ClassLinker::InitFromImage() { VLOG(startup) << "ClassLinker::InitFromImage exiting"; } +bool ClassLinker::ClassInClassTable(mirror::Class* klass) { + ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + auto it = class_table_.Find(GcRoot<mirror::Class>(klass)); + if (it == class_table_.end()) { + return false; + } + return it->Read() == klass; +} + void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( visitor, RootInfo(kRootStickyClass)); if ((flags & kVisitRootFlagAllRoots) != 0) { + // Argument for how root visiting deals with ArtField and ArtMethod roots. + // There is 3 GC cases to handle: + // Non moving concurrent: + // This case is easy to handle since the reference members of ArtMethod and ArtFields are held + // live by the class and class roots. In this case we probably don't even need to call + // VisitNativeRoots. + // + // Moving non-concurrent: + // This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move. + // To prevent missing roots, this case needs to ensure that there is no + // suspend points between the point which we allocate ArtMethod arrays and place them in a + // class which is in the class table. + // + // Moving concurrent: + // Need to make sure to not copy ArtMethods without doing read barriers since the roots are + // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy. for (GcRoot<mirror::Class>& root : class_table_) { buffered_visitor.VisitRoot(root); - root.Read()->VisitFieldRoots(buffered_visitor); + if ((flags & kVisitRootFlagNonMoving) == 0) { + // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since + // these roots are all reachable from the class or dex cache. + root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + } } // PreZygote classes can't move so we won't need to update fields' declaring classes. for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { buffered_visitor.VisitRoot(root); - root.Read()->VisitFieldRoots(buffered_visitor); + if ((flags & kVisitRootFlagNonMoving) == 0) { + root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); + } } } else if ((flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_class_roots_) { mirror::Class* old_ref = root.Read<kWithoutReadBarrier>(); - old_ref->VisitFieldRoots(buffered_visitor); + old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_); root.VisitRoot(visitor, RootInfo(kRootStickyClass)); mirror::Class* new_ref = root.Read<kWithoutReadBarrier>(); if (UNLIKELY(new_ref != old_ref)) { @@ -1353,7 +1431,6 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* ar } ClassLinker::~ClassLinker() { - mirror::ArtMethod::ResetClass(); mirror::Class::ResetClass(); mirror::Constructor::ResetClass(); mirror::Field::ResetClass(); @@ -1376,48 +1453,47 @@ ClassLinker::~ClassLinker() { STLDeleteElements(&oat_files_); } +mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) { + return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ? + static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) : + static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length))); +} + mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) { - gc::Heap* const heap = Runtime::Current()->GetHeap(); - StackHandleScope<16> hs(self); - Handle<mirror::Class> dex_cache_class(hs.NewHandle(GetClassRoot(kJavaLangDexCache))); - Handle<mirror::DexCache> dex_cache( - hs.NewHandle(down_cast<mirror::DexCache*>( - heap->AllocObject<true>(self, dex_cache_class.Get(), dex_cache_class->GetObjectSize(), - VoidFunctor())))); + StackHandleScope<6> hs(self); + auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>( + GetClassRoot(kJavaLangDexCache)->AllocObject(self)))); if (dex_cache.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } - Handle<mirror::String> - location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str()))); + auto location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str()))); if (location.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } - Handle<mirror::ObjectArray<mirror::String>> - strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds()))); + auto strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds()))); if (strings.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } - Handle<mirror::ObjectArray<mirror::Class>> - types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds()))); + auto types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds()))); if (types.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } - Handle<mirror::ObjectArray<mirror::ArtMethod>> - methods(hs.NewHandle(AllocArtMethodArray(self, dex_file.NumMethodIds()))); + auto methods(hs.NewHandle(AllocPointerArray(self, dex_file.NumMethodIds()))); if (methods.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } - Handle<mirror::Array> fields; - if (image_pointer_size_ == 8) { - fields = hs.NewHandle<mirror::Array>(mirror::LongArray::Alloc(self, dex_file.NumFieldIds())); - } else { - fields = hs.NewHandle<mirror::Array>(mirror::IntArray::Alloc(self, dex_file.NumFieldIds())); - } + auto fields(hs.NewHandle(AllocPointerArray(self, dex_file.NumFieldIds()))); if (fields.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } dex_cache->Init(&dex_file, location.Get(), strings.Get(), types.Get(), methods.Get(), - fields.Get()); + fields.Get(), image_pointer_size_); return dex_cache.Get(); } @@ -1430,7 +1506,7 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Cl heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) : heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor); if (UNLIKELY(k == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return nullptr; } return k->AsClass(); @@ -1440,11 +1516,6 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) { return AllocClass(self, GetClassRoot(kJavaLangClass), class_size); } -mirror::ArtMethod* ClassLinker::AllocArtMethod(Thread* self) { - return down_cast<mirror::ArtMethod*>( - GetClassRoot(kJavaLangReflectArtMethod)->AllocNonMovableObject(self)); -} - mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElementArray( Thread* self, size_t length) { return mirror::ObjectArray<mirror::StackTraceElement>::Alloc( @@ -1749,8 +1820,6 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si klass.Assign(GetClassRoot(kJavaLangRefReference)); } else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) { klass.Assign(GetClassRoot(kJavaLangDexCache)); - } else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) { - klass.Assign(GetClassRoot(kJavaLangReflectArtMethod)); } } @@ -1896,7 +1965,8 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file, } } } - return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref); + return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref, + image_pointer_size_); } OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, @@ -1945,7 +2015,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16 UNREACHABLE(); } -const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, bool* found) { +const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* found) { // Although we overwrite the trampoline of non-static methods, we may get here via the resolution // method for direct methods (or virtual methods made direct). mirror::Class* declaring_class = method->GetDeclaringClass(); @@ -1962,7 +2032,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method for (size_t i = 0; i < end; i++) { // Check method index instead of identity in case of duplicate method definitions. if (method->GetDexMethodIndex() == - declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) { + declaring_class->GetVirtualMethod(i, image_pointer_size_)->GetDexMethodIndex()) { found_virtual = true; break; } @@ -1985,7 +2055,7 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method } // Special case to get oat code without overwriting a trampoline. -const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) { +const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) { CHECK(!method->IsAbstract()) << PrettyMethod(method); if (method->IsProxyMethod()) { return GetQuickProxyInvokeHandler(); @@ -2012,7 +2082,7 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) { return GetQuickToInterpreterBridge(); } -const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) { +const void* ClassLinker::GetOatMethodQuickCodeFor(ArtMethod* method) { if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) { return nullptr; } @@ -2043,7 +2113,7 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl } // Returns true if the method must run with interpreter, false otherwise. -static bool NeedsInterpreter(mirror::ArtMethod* method, const void* quick_code) +static bool NeedsInterpreter(ArtMethod* method, const void* quick_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (quick_code == nullptr) { // No code: need interpreter. @@ -2088,7 +2158,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { &has_oat_class); // Link the code of methods skipped by LinkCode. for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) { - mirror::ArtMethod* method = klass->GetDirectMethod(method_index); + ArtMethod* method = klass->GetDirectMethod(method_index, image_pointer_size_); if (!method->IsStatic()) { // Only update static methods. continue; @@ -2113,10 +2183,9 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { // Ignore virtual methods on the iterator. } -void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, - const OatFile::OatClass* oat_class, +void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class, uint32_t class_def_method_index) { - Runtime* runtime = Runtime::Current(); + Runtime* const runtime = Runtime::Current(); if (runtime->IsAotCompiler()) { // The following code only applies to a non-compiler runtime. return; @@ -2127,12 +2196,11 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, // Every kind of method should at least get an invoke stub from the oat_method. // non-abstract methods also get their code pointers. const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index); - oat_method.LinkMethod(method.Get()); + oat_method.LinkMethod(method); } // Install entry point from interpreter. - bool enter_interpreter = NeedsInterpreter(method.Get(), - method->GetEntryPointFromQuickCompiledCode()); + bool enter_interpreter = NeedsInterpreter(method, method->GetEntryPointFromQuickCompiledCode()); if (enter_interpreter && !method->IsNative()) { method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge); } else { @@ -2221,93 +2289,83 @@ ArtField* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) { return ptr; } +ArtMethod* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) { + const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_); + uintptr_t ptr = reinterpret_cast<uintptr_t>( + Runtime::Current()->GetLinearAlloc()->Alloc(self, method_size * length)); + CHECK_NE(ptr, 0u); + for (size_t i = 0; i < length; ++i) { + new(reinterpret_cast<void*>(ptr + i * method_size)) ArtMethod; + } + return reinterpret_cast<ArtMethod*>(ptr); +} + void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data, Handle<mirror::Class> klass, const OatFile::OatClass* oat_class) { - // Load static fields. - ClassDataItemIterator it(dex_file, class_data); - const size_t num_sfields = it.NumStaticFields(); - ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr; - for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) { - CHECK_LT(i, num_sfields); - LoadField(it, klass, &sfields[i]); - } - klass->SetSFields(sfields); - klass->SetNumStaticFields(num_sfields); - DCHECK_EQ(klass->NumStaticFields(), num_sfields); - // Load instance fields. - const size_t num_ifields = it.NumInstanceFields(); - ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr; - for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) { - CHECK_LT(i, num_ifields); - LoadField(it, klass, &ifields[i]); - } - klass->SetIFields(ifields); - klass->SetNumInstanceFields(num_ifields); - DCHECK_EQ(klass->NumInstanceFields(), num_ifields); - // Note: We cannot have thread suspension until the field arrays are setup or else - // Class::VisitFieldRoots may miss some fields. - self->AllowThreadSuspension(); - // Load methods. - if (it.NumDirectMethods() != 0) { - // TODO: append direct methods to class object - mirror::ObjectArray<mirror::ArtMethod>* directs = - AllocArtMethodArray(self, it.NumDirectMethods()); - if (UNLIKELY(directs == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return; - } - klass->SetDirectMethods(directs); - } - if (it.NumVirtualMethods() != 0) { - // TODO: append direct methods to class object - mirror::ObjectArray<mirror::ArtMethod>* virtuals = - AllocArtMethodArray(self, it.NumVirtualMethods()); - if (UNLIKELY(virtuals == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return; - } - klass->SetVirtualMethods(virtuals); - } - size_t class_def_method_index = 0; - uint32_t last_dex_method_index = DexFile::kDexNoIndex; - size_t last_class_def_method_index = 0; - for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { - self->AllowThreadSuspension(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass))); - if (UNLIKELY(method.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return; + { + // Note: We cannot have thread suspension until the field and method arrays are setup or else + // Class::VisitFieldRoots may miss some fields or methods. + ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + // Load static fields. + ClassDataItemIterator it(dex_file, class_data); + const size_t num_sfields = it.NumStaticFields(); + ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr; + for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) { + CHECK_LT(i, num_sfields); + LoadField(it, klass, &sfields[i]); + } + klass->SetSFields(sfields); + klass->SetNumStaticFields(num_sfields); + DCHECK_EQ(klass->NumStaticFields(), num_sfields); + // Load instance fields. + const size_t num_ifields = it.NumInstanceFields(); + ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr; + for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) { + CHECK_LT(i, num_ifields); + LoadField(it, klass, &ifields[i]); + } + klass->SetIFields(ifields); + klass->SetNumInstanceFields(num_ifields); + DCHECK_EQ(klass->NumInstanceFields(), num_ifields); + // Load methods. + if (it.NumDirectMethods() != 0) { + klass->SetDirectMethodsPtr(AllocArtMethodArray(self, it.NumDirectMethods())); + } + klass->SetNumDirectMethods(it.NumDirectMethods()); + if (it.NumVirtualMethods() != 0) { + klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, it.NumVirtualMethods())); + } + klass->SetNumVirtualMethods(it.NumVirtualMethods()); + size_t class_def_method_index = 0; + uint32_t last_dex_method_index = DexFile::kDexNoIndex; + size_t last_class_def_method_index = 0; + for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { + ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_); + LoadMethod(self, dex_file, it, klass, method); + LinkCode(method, oat_class, class_def_method_index); + uint32_t it_method_index = it.GetMemberIndex(); + if (last_dex_method_index == it_method_index) { + // duplicate case + method->SetMethodIndex(last_class_def_method_index); + } else { + method->SetMethodIndex(class_def_method_index); + last_dex_method_index = it_method_index; + last_class_def_method_index = class_def_method_index; + } + class_def_method_index++; } - klass->SetDirectMethod(i, method.Get()); - LinkCode(method, oat_class, class_def_method_index); - uint32_t it_method_index = it.GetMemberIndex(); - if (last_dex_method_index == it_method_index) { - // duplicate case - method->SetMethodIndex(last_class_def_method_index); - } else { - method->SetMethodIndex(class_def_method_index); - last_dex_method_index = it_method_index; - last_class_def_method_index = class_def_method_index; + for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) { + ArtMethod* method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); + LoadMethod(self, dex_file, it, klass, method); + DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i); + LinkCode(method, oat_class, class_def_method_index); + class_def_method_index++; } - class_def_method_index++; + DCHECK(!it.HasNext()); } - for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) { - self->AllowThreadSuspension(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass))); - if (UNLIKELY(method.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return; - } - klass->SetVirtualMethod(i, method.Get()); - DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i); - LinkCode(method, oat_class, class_def_method_index); - class_def_method_index++; - } - DCHECK(!it.HasNext()); + self->AllowThreadSuspension(); } void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, @@ -2318,20 +2376,12 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Clas dst->SetAccessFlags(it.GetFieldAccessFlags()); } -mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, - const ClassDataItemIterator& it, - Handle<mirror::Class> klass) { +void ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it, + Handle<mirror::Class> klass, ArtMethod* dst) { uint32_t dex_method_idx = it.GetMemberIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_); - mirror::ArtMethod* dst = AllocArtMethod(self); - if (UNLIKELY(dst == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - DCHECK(dst->IsArtMethod()) << PrettyDescriptor(dst->GetClass()); - ScopedAssertNoThreadSuspension ants(self, "LoadMethod"); dst->SetDexMethodIndex(dex_method_idx); dst->SetDeclaringClass(klass.Get()); @@ -2377,8 +2427,6 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file } } dst->SetAccessFlags(access_flags); - - return dst; } void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) { @@ -2482,17 +2530,17 @@ mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) { UNREACHABLE(); } -void ClassLinker::FixupDexCaches(mirror::ArtMethod* resolution_method) { +void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) { ReaderMutexLock mu(Thread::Current(), dex_lock_); - for (size_t i = 0; i != dex_caches_.size(); ++i) { - mirror::DexCache* dex_cache = GetDexCache(i); - dex_cache->Fixup(resolution_method); + for (auto& dex_cache : dex_caches_) { + dex_cache.Read()->Fixup(resolution_method, image_pointer_size_); } } mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) { - mirror::Class* klass = AllocClass(self, mirror::Class::PrimitiveClassSize()); + mirror::Class* klass = AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_)); if (UNLIKELY(klass == nullptr)) { + self->AssertPendingOOMException(); return nullptr; } return InitializePrimitiveClass(klass, type); @@ -2593,9 +2641,6 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto new_class.Assign(GetClassRoot(kObjectArrayClass)); } else if (strcmp(descriptor, GetClassRootDescriptor(kJavaLangStringArrayClass)) == 0) { new_class.Assign(GetClassRoot(kJavaLangStringArrayClass)); - } else if (strcmp(descriptor, - GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)) == 0) { - new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass)); } else if (strcmp(descriptor, "[C") == 0) { new_class.Assign(GetClassRoot(kCharArrayClass)); } else if (strcmp(descriptor, "[I") == 0) { @@ -2605,8 +2650,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto } } if (new_class.Get() == nullptr) { - new_class.Assign(AllocClass(self, mirror::Array::ClassSize())); + new_class.Assign(AllocClass(self, mirror::Array::ClassSize(image_pointer_size_))); if (new_class.Get() == nullptr) { + self->AssertPendingOOMException(); return nullptr; } new_class->SetComponentType(component_type.Get()); @@ -2620,9 +2666,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto new_class->SetClassLoader(component_type->GetClassLoader()); mirror::Class::SetStatus(new_class, mirror::Class::kStatusLoaded, self); { - StackHandleScope<mirror::Class::kImtSize> hs2(self, - Runtime::Current()->GetImtUnimplementedMethod()); - new_class->PopulateEmbeddedImtAndVTable(&hs2); + ArtMethod* imt[mirror::Class::kImtSize]; + std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod()); + new_class->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_); } mirror::Class::SetStatus(new_class, mirror::Class::kStatusInitialized, self); // don't need to set new_class->SetObjectSize(..) @@ -2732,6 +2778,18 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k return nullptr; } +void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, + size_t new_num_methods) { + // classlinker_classes_lock_ is used to guard against races between root marking and changing the + // direct and virtual method pointers. + WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + klass->SetNumVirtualMethods(new_num_methods); + klass->SetVirtualMethodsPtr(new_methods); + if (log_new_class_table_roots_) { + new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); + } +} + mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); @@ -3073,7 +3131,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) { void ClassLinker::EnsurePreverifiedMethods(Handle<mirror::Class> klass) { if (!klass->IsPreverified()) { - klass->SetPreverifiedFlagOnAllMethods(); + klass->SetPreverifiedFlagOnAllMethods(image_pointer_size_); klass->SetPreverified(); } } @@ -3164,15 +3222,15 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Handle<mirror::Class> klass) { for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i)); + ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i, image_pointer_size_)); } for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - ResolveMethodExceptionHandlerTypes(dex_file, klass->GetVirtualMethod(i)); + ResolveMethodExceptionHandlerTypes(dex_file, klass->GetVirtualMethod(i, image_pointer_size_)); } } void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, - mirror::ArtMethod* method) { + ArtMethod* method) { // similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod. const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset()); if (code_item == nullptr) { @@ -3201,10 +3259,6 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, } } -static void CheckProxyConstructor(mirror::ArtMethod* constructor); -static void CheckProxyMethod(Handle<mirror::ArtMethod> method, - Handle<mirror::ArtMethod> prototype); - mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name, jobjectArray interfaces, jobject loader, jobjectArray methods, jobjectArray throws) { @@ -3255,48 +3309,37 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); // Proxies have 1 direct method, the constructor - { - StackHandleScope<2> hs2(self); - Handle<mirror::ObjectArray<mirror::ArtMethod>> directs = - hs2.NewHandle(AllocArtMethodArray(self, 1)); - if (UNLIKELY(directs.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - klass->SetDirectMethods(directs.Get()); - Handle<mirror::ArtMethod> constructor = - hs2.NewHandle(CreateProxyConstructor(self, klass)); - if (UNLIKELY(constructor.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - klass->SetDirectMethod(0, constructor.Get()); + auto* directs = AllocArtMethodArray(self, 1); + // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we + // want to throw OOM in the future. + if (UNLIKELY(directs == nullptr)) { + self->AssertPendingOOMException(); + return nullptr; } + klass->SetDirectMethodsPtr(directs); + klass->SetNumDirectMethods(1u); + CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_)); // Create virtual method using specified prototypes. auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods)); DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass()) << PrettyClass(h_methods->GetClass()); const size_t num_virtual_methods = h_methods->GetLength(); - { - StackHandleScope<1> hs2(self); - Handle<mirror::ObjectArray<mirror::ArtMethod>> virtuals = - hs2.NewHandle(AllocArtMethodArray(self, num_virtual_methods)); - if (UNLIKELY(virtuals.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - klass->SetVirtualMethods(virtuals.Get()); + auto* virtuals = AllocArtMethodArray(self, num_virtual_methods); + // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we + // want to throw OOM in the future. + if (UNLIKELY(virtuals == nullptr)) { + self->AssertPendingOOMException(); + return nullptr; } + klass->SetVirtualMethodsPtr(virtuals); + klass->SetNumVirtualMethods(num_virtual_methods); for (size_t i = 0; i < num_virtual_methods; ++i) { - StackHandleScope<2> hs2(self); - Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod())); - Handle<mirror::ArtMethod> clone(hs2.NewHandle(CreateProxyMethod(self, klass, prototype))); - if (UNLIKELY(clone.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - klass->SetVirtualMethod(i, clone.Get()); + auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); + auto* prototype = h_methods->Get(i)->GetArtMethod(); + CreateProxyMethod(klass, prototype, virtual_method); + DCHECK(virtual_method->GetDeclaringClass() != nullptr); + DCHECK(prototype->GetDeclaringClass() != nullptr); } // The super class is java.lang.reflect.Proxy @@ -3311,7 +3354,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& ObjectLock<mirror::Class> resolution_lock(self, klass); // Link the fields and virtual methods, creating vtable and iftables. // The new class will replace the old one in the class table. - Handle<mirror::ObjectArray<mirror::Class> > h_interfaces( + Handle<mirror::ObjectArray<mirror::Class>> h_interfaces( hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces))); if (!LinkClass(self, descriptor.c_str(), klass, h_interfaces, &new_class)) { mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); @@ -3338,11 +3381,11 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& // sanity checks if (kIsDebugBuild) { CHECK(klass->GetIFields() == nullptr); - CheckProxyConstructor(klass->GetDirectMethod(0)); + CheckProxyConstructor(klass->GetDirectMethod(0, image_pointer_size_)); + for (size_t i = 0; i < num_virtual_methods; ++i) { - StackHandleScope<2> hs2(self); - Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod())); - Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i))); + auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); + auto* prototype = h_methods->Get(i++)->GetArtMethod(); CheckProxyMethod(virtual_method, prototype); } @@ -3371,8 +3414,8 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) { return DotToDescriptor(name->ToModifiedUtf8().c_str()); } -mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, - mirror::ArtMethod* proxy_method) { +ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, + ArtMethod* proxy_method) { DCHECK(proxy_class->IsProxyClass()); DCHECK(proxy_method->IsProxyMethod()); { @@ -3381,8 +3424,8 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, for (const GcRoot<mirror::DexCache>& root : dex_caches_) { auto* dex_cache = root.Read(); if (proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) { - mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod( - proxy_method->GetDexMethodIndex()); + ArtMethod* resolved_method = dex_cache->GetResolvedMethod( + proxy_method->GetDexMethodIndex(), image_pointer_size_); CHECK(resolved_method != nullptr); return resolved_method; } @@ -3393,74 +3436,60 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, UNREACHABLE(); } - -mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self, - Handle<mirror::Class> klass) { - // Create constructor for Proxy that must initialize h - mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods = - GetClassRoot(kJavaLangReflectProxy)->GetDirectMethods(); - CHECK_EQ(proxy_direct_methods->GetLength(), 16); - mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2); +void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) { + // Create constructor for Proxy that must initialize the method. + CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 16u); + ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked( + 2, image_pointer_size_); // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden // constructor method. GetClassRoot(kJavaLangReflectProxy)->GetDexCache()->SetResolvedMethod( - proxy_constructor->GetDexMethodIndex(), proxy_constructor); + proxy_constructor->GetDexMethodIndex(), proxy_constructor, image_pointer_size_); // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its // code_ too) - mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self)); - if (constructor == nullptr) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } + DCHECK(out != nullptr); + out->CopyFrom(proxy_constructor, image_pointer_size_); // Make this constructor public and fix the class to be our Proxy version - constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic); - constructor->SetDeclaringClass(klass.Get()); - return constructor; + out->SetAccessFlags((out->GetAccessFlags() & ~kAccProtected) | kAccPublic); + out->SetDeclaringClass(klass.Get()); } -static void CheckProxyConstructor(mirror::ArtMethod* constructor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const { CHECK(constructor->IsConstructor()); - CHECK_STREQ(constructor->GetName(), "<init>"); - CHECK_STREQ(constructor->GetSignature().ToString().c_str(), - "(Ljava/lang/reflect/InvocationHandler;)V"); + auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_); + CHECK_STREQ(np->GetName(), "<init>"); + CHECK_STREQ(np->GetSignature().ToString().c_str(), "(Ljava/lang/reflect/InvocationHandler;)V"); DCHECK(constructor->IsPublic()); } -mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self, - Handle<mirror::Class> klass, - Handle<mirror::ArtMethod> prototype) { +void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, + ArtMethod* out) { // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden // prototype method auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache(); // Avoid dirtying the dex cache unless we need to. - if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex()) != prototype.Get()) { - dex_cache->SetResolvedMethod(prototype->GetDexMethodIndex(), prototype.Get()); + if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex(), image_pointer_size_) != + prototype) { + dex_cache->SetResolvedMethod( + prototype->GetDexMethodIndex(), prototype, image_pointer_size_); } // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize // as necessary - mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self)); - if (UNLIKELY(method == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } + DCHECK(out != nullptr); + out->CopyFrom(prototype, image_pointer_size_); // Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to // the intersection of throw exceptions as defined in Proxy - method->SetDeclaringClass(klass.Get()); - method->SetAccessFlags((method->GetAccessFlags() & ~kAccAbstract) | kAccFinal); + out->SetDeclaringClass(klass.Get()); + out->SetAccessFlags((out->GetAccessFlags() & ~kAccAbstract) | kAccFinal); // At runtime the method looks like a reference and argument saving method, clone the code // related parameters from this method. - method->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler()); - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); - - return method; + out->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler()); + out->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); } -static void CheckProxyMethod(Handle<mirror::ArtMethod> method, - Handle<mirror::ArtMethod> prototype) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const { // Basic sanity CHECK(!prototype->IsFinal()); CHECK(method->IsFinal()); @@ -3468,26 +3497,26 @@ static void CheckProxyMethod(Handle<mirror::ArtMethod> method, // The proxy method doesn't have its own dex cache or dex file and so it steals those of its // interface prototype. The exception to this are Constructors and the Class of the Proxy itself. - CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get())); - CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get())); - CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), method->GetDexCache()); + CHECK(prototype->HasSameDexCacheResolvedMethods(method)); + CHECK(prototype->HasSameDexCacheResolvedTypes(method)); + auto* np = method->GetInterfaceMethodIfProxy(image_pointer_size_); + CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), np->GetDexCache()); CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex()); - CHECK_STREQ(method->GetName(), prototype->GetName()); - CHECK_STREQ(method->GetShorty(), prototype->GetShorty()); + CHECK_STREQ(np->GetName(), prototype->GetName()); + CHECK_STREQ(np->GetShorty(), prototype->GetShorty()); // More complex sanity - via dex cache - CHECK_EQ(method->GetInterfaceMethodIfProxy()->GetReturnType(), prototype->GetReturnType()); + CHECK_EQ(np->GetReturnType(), prototype->GetReturnType()); } -static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, - bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, + bool can_init_parents) { if (can_init_statics && can_init_parents) { return true; } if (!can_init_statics) { // Check if there's a class initializer. - mirror::ArtMethod* clinit = klass->FindClassInitializer(); + ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_); if (clinit != nullptr) { return false; } @@ -3500,17 +3529,14 @@ static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, } } } - if (!klass->IsInterface() && klass->HasSuperClass()) { - mirror::Class* super_class = klass->GetSuperClass(); - if (!can_init_parents && !super_class->IsInitialized()) { - return false; - } else { - if (!CanWeInitializeClass(super_class, can_init_statics, can_init_parents)) { - return false; - } - } + if (klass->IsInterface() || !klass->HasSuperClass()) { + return true; } - return true; + mirror::Class* super_class = klass->GetSuperClass(); + if (!can_init_parents && !super_class->IsInitialized()) { + return false; + } + return CanWeInitializeClass(super_class, can_init_statics, can_init_parents); } bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, @@ -3670,7 +3696,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, } } - mirror::ArtMethod* clinit = klass->FindClassInitializer(); + ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_); if (clinit != nullptr) { CHECK(can_init_statics); JValue result; @@ -3761,8 +3787,8 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, - Handle<mirror::ArtMethod> method, - mirror::ArtMethod* m) + ArtMethod* method, + ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); DCHECK(!m->IsProxyMethod()); @@ -3776,7 +3802,7 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> "While checking class %s method %s signature against %s %s: " "Failed to resolve return type %s with %s", PrettyDescriptor(klass.Get()).c_str(), - PrettyMethod(method.Get()).c_str(), + PrettyMethod(method).c_str(), super_klass->IsInterface() ? "interface" : "superclass", PrettyDescriptor(super_klass.Get()).c_str(), return_type.c_str(), class_loader.c_str()); @@ -3784,8 +3810,8 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, - Handle<mirror::ArtMethod> method, - mirror::ArtMethod* m, + ArtMethod* method, + ArtMethod* m, uint32_t index, uint32_t arg_type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); @@ -3797,7 +3823,7 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass, "While checking class %s method %s signature against %s %s: " "Failed to resolve arg %u type %s with %s", PrettyDescriptor(klass.Get()).c_str(), - PrettyMethod(method.Get()).c_str(), + PrettyMethod(method).c_str(), super_klass->IsInterface() ? "interface" : "superclass", PrettyDescriptor(super_klass.Get()).c_str(), index, arg_type.c_str(), class_loader.c_str()); @@ -3805,13 +3831,13 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass, static void ThrowSignatureMismatch(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, - Handle<mirror::ArtMethod> method, + ArtMethod* method, const std::string& error_msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in %s %s: %s", PrettyDescriptor(klass.Get()).c_str(), - PrettyMethod(method.Get()).c_str(), + PrettyMethod(method).c_str(), super_klass->IsInterface() ? "interface" : "superclass", PrettyDescriptor(super_klass.Get()).c_str(), error_msg.c_str()); @@ -3820,19 +3846,19 @@ static void ThrowSignatureMismatch(Handle<mirror::Class> klass, static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, - Handle<mirror::ArtMethod> method1, - Handle<mirror::ArtMethod> method2) + ArtMethod* method1, + ArtMethod* method2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { { StackHandleScope<1> hs(self); Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType())); if (UNLIKELY(return_type.Get() == nullptr)) { - ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method1.Get()); + ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method1); return false; } mirror::Class* other_return_type = method2->GetReturnType(); if (UNLIKELY(other_return_type == nullptr)) { - ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method2.Get()); + ThrowSignatureCheckResolveReturnTypeException(klass, super_klass, method1, method2); return false; } if (UNLIKELY(other_return_type != return_type.Get())) { @@ -3851,7 +3877,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, if (types2 != nullptr && types2->Size() != 0) { ThrowSignatureMismatch(klass, super_klass, method1, StringPrintf("Type list mismatch with %s", - PrettyMethod(method2.Get(), true).c_str())); + PrettyMethod(method2, true).c_str())); return false; } return true; @@ -3859,7 +3885,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, if (types1->Size() != 0) { ThrowSignatureMismatch(klass, super_klass, method1, StringPrintf("Type list mismatch with %s", - PrettyMethod(method2.Get(), true).c_str())); + PrettyMethod(method2, true).c_str())); return false; } return true; @@ -3868,7 +3894,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, if (UNLIKELY(num_types != types2->Size())) { ThrowSignatureMismatch(klass, super_klass, method1, StringPrintf("Type list mismatch with %s", - PrettyMethod(method2.Get(), true).c_str())); + PrettyMethod(method2, true).c_str())); return false; } for (uint32_t i = 0; i < num_types; ++i) { @@ -3878,7 +3904,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, method1->GetClassFromTypeIndex(param_type_idx, true))); if (UNLIKELY(param_type.Get() == nullptr)) { ThrowSignatureCheckResolveArgException(klass, super_klass, method1, - method1.Get(), i, param_type_idx); + method1, i, param_type_idx); return false; } uint32_t other_param_type_idx = types2->GetTypeItem(i).type_idx_; @@ -3886,7 +3912,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, method2->GetClassFromTypeIndex(other_param_type_idx, true); if (UNLIKELY(other_param_type == nullptr)) { ThrowSignatureCheckResolveArgException(klass, super_klass, method1, - method2.Get(), i, other_param_type_idx); + method2, i, other_param_type_idx); return false; } if (UNLIKELY(param_type.Get() != other_param_type)) { @@ -3910,19 +3936,17 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) { } // Begin with the methods local to the superclass. Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); + StackHandleScope<1> hs(self); MutableHandle<mirror::Class> super_klass(hs.NewHandle<mirror::Class>(nullptr)); - MutableHandle<mirror::ArtMethod> h_m(hs.NewHandle<mirror::ArtMethod>(nullptr)); - MutableHandle<mirror::ArtMethod> super_h_m(hs.NewHandle<mirror::ArtMethod>(nullptr)); if (klass->HasSuperClass() && klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) { super_klass.Assign(klass->GetSuperClass()); for (int i = klass->GetSuperClass()->GetVTableLength() - 1; i >= 0; --i) { - h_m.Assign(klass->GetVTableEntry(i)); - super_h_m.Assign(klass->GetSuperClass()->GetVTableEntry(i)); - if (h_m.Get() != super_h_m.Get()) { + auto* m = klass->GetVTableEntry(i, image_pointer_size_); + auto* super_m = klass->GetSuperClass()->GetVTableEntry(i, image_pointer_size_); + if (m != super_m) { if (UNLIKELY(!HasSameSignatureWithDifferentClassLoaders(self, klass, super_klass, - h_m, super_h_m))) { + m, super_m))) { self->AssertPendingException(); return false; } @@ -3934,11 +3958,12 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) { if (klass->GetClassLoader() != super_klass->GetClassLoader()) { uint32_t num_methods = super_klass->NumVirtualMethods(); for (uint32_t j = 0; j < num_methods; ++j) { - h_m.Assign(klass->GetIfTable()->GetMethodArray(i)->GetWithoutChecks(j)); - super_h_m.Assign(super_klass->GetVirtualMethod(j)); - if (h_m.Get() != super_h_m.Get()) { + auto* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>( + j, image_pointer_size_); + auto* super_m = super_klass->GetVirtualMethod(j, image_pointer_size_); + if (m != super_m) { if (UNLIKELY(!HasSameSignatureWithDifferentClassLoaders(self, klass, super_klass, - h_m, super_h_m))) { + m, super_m))) { self->AssertPendingException(); return false; } @@ -3967,8 +3992,10 @@ bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool return success; } -void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) { +void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, + mirror::Class* new_class) { ArtField* fields = new_class->GetIFields(); + DCHECK_EQ(temp_class->NumInstanceFields(), new_class->NumInstanceFields()); for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) { if (fields[i].GetDeclaringClass() == temp_class) { fields[i].SetDeclaringClass(new_class); @@ -3976,27 +4003,24 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror } fields = new_class->GetSFields(); + DCHECK_EQ(temp_class->NumStaticFields(), new_class->NumStaticFields()); for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) { if (fields[i].GetDeclaringClass() == temp_class) { fields[i].SetDeclaringClass(new_class); } } - mirror::ObjectArray<mirror::ArtMethod>* methods = new_class->GetDirectMethods(); - if (methods != nullptr) { - for (int index = 0; index < methods->GetLength(); index ++) { - if (methods->Get(index)->GetDeclaringClass() == temp_class) { - methods->Get(index)->SetDeclaringClass(new_class); - } + DCHECK_EQ(temp_class->NumDirectMethods(), new_class->NumDirectMethods()); + for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) { + if (method.GetDeclaringClass() == temp_class) { + method.SetDeclaringClass(new_class); } } - methods = new_class->GetVirtualMethods(); - if (methods != nullptr) { - for (int index = 0; index < methods->GetLength(); index ++) { - if (methods->Get(index)->GetDeclaringClass() == temp_class) { - methods->Get(index)->SetDeclaringClass(new_class); - } + DCHECK_EQ(temp_class->NumVirtualMethods(), new_class->NumVirtualMethods()); + for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) { + if (method.GetDeclaringClass() == temp_class) { + method.SetDeclaringClass(new_class); } } } @@ -4009,9 +4033,9 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: if (!LinkSuperClass(klass)) { return false; } - StackHandleScope<mirror::Class::kImtSize> imt_handle_scope( - self, Runtime::Current()->GetImtUnimplementedMethod()); - if (!LinkMethods(self, klass, interfaces, &imt_handle_scope)) { + ArtMethod* imt[mirror::Class::kImtSize]; + std::fill_n(imt, arraysize(imt), Runtime::Current()->GetImtUnimplementedMethod()); + if (!LinkMethods(self, klass, interfaces, imt)) { return false; } if (!LinkInstanceFields(self, klass)) { @@ -4030,7 +4054,7 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get()); if (klass->ShouldHaveEmbeddedImtAndVTable()) { - klass->PopulateEmbeddedImtAndVTable(&imt_handle_scope); + klass->PopulateEmbeddedImtAndVTable(imt, image_pointer_size_); } // This will notify waiters on klass that saw the not yet resolved @@ -4041,10 +4065,9 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: CHECK(!klass->IsResolved()); // Retire the temporary class and create the correctly sized resolved class. StackHandleScope<1> hs(self); - auto h_new_class = hs.NewHandle<mirror::Class>( - klass->CopyOf(self, class_size, &imt_handle_scope)); + auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_)); if (UNLIKELY(h_new_class.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // Expect an OOME. + self->AssertPendingOOMException(); mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); return false; } @@ -4356,7 +4379,7 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) { // Populate the class vtable and itable. Compute return type indices. bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - StackHandleScope<mirror::Class::kImtSize>* out_imt) { + ArtMethod** out_imt) { self->AllowThreadSuspension(); if (klass->IsInterface()) { // No vtable. @@ -4366,7 +4389,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, return false; } for (size_t i = 0; i < count; ++i) { - klass->GetVirtualMethodDuringLinking(i)->SetMethodIndex(i); + klass->GetVirtualMethodDuringLinking(i, image_pointer_size_)->SetMethodIndex(i); } } else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first. return false; @@ -4379,7 +4402,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, // caches in the implementation below. class MethodNameAndSignatureComparator FINAL : public ValueObject { public: - explicit MethodNameAndSignatureComparator(mirror::ArtMethod* method) + explicit MethodNameAndSignatureComparator(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())), name_(nullptr), name_len_(0) { @@ -4393,7 +4416,7 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject { return name_; } - bool HasSameNameAndSignature(mirror::ArtMethod* other) + bool HasSameNameAndSignature(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(!other->IsProxyMethod()) << PrettyMethod(other); const DexFile* other_dex_file = other->GetDexFile(); @@ -4424,13 +4447,16 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject { class LinkVirtualHashTable { public: - LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table) - : klass_(klass), hash_size_(hash_size), hash_table_(hash_table) { + LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table, + size_t image_pointer_size) + : klass_(klass), hash_size_(hash_size), hash_table_(hash_table), + image_pointer_size_(image_pointer_size) { std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_); } void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(virtual_method_index); - const char* name = local_method->GetName(); + ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking( + virtual_method_index, image_pointer_size_); + const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName(); uint32_t hash = ComputeModifiedUtf8Hash(name); uint32_t index = hash % hash_size_; // Linear probe until we have an empty slot. @@ -4454,9 +4480,10 @@ class LinkVirtualHashTable { break; } if (value != removed_index_) { // This signifies not already overriden. - mirror::ArtMethod* virtual_method = - klass_->GetVirtualMethodDuringLinking(value); - if (comparator->HasSameNameAndSignature(virtual_method->GetInterfaceMethodIfProxy())) { + ArtMethod* virtual_method = + klass_->GetVirtualMethodDuringLinking(value, image_pointer_size_); + if (comparator->HasSameNameAndSignature( + virtual_method->GetInterfaceMethodIfProxy(image_pointer_size_))) { hash_table_[index] = removed_index_; return value; } @@ -4478,6 +4505,7 @@ class LinkVirtualHashTable { Handle<mirror::Class> klass_; const size_t hash_size_; uint32_t* const hash_table_; + const size_t image_pointer_size_; }; const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max(); @@ -4490,30 +4518,32 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) const size_t max_count = num_virtual_methods + super_vtable_length; StackHandleScope<2> hs(self); Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass())); - MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable; + MutableHandle<mirror::PointerArray> vtable; if (super_class->ShouldHaveEmbeddedImtAndVTable()) { - vtable = hs.NewHandle(AllocArtMethodArray(self, max_count)); + vtable = hs.NewHandle(AllocPointerArray(self, max_count)); if (UNLIKELY(vtable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } for (size_t i = 0; i < super_vtable_length; i++) { - vtable->SetWithoutChecks<false>(i, super_class->GetEmbeddedVTableEntry(i)); + vtable->SetElementPtrSize( + i, super_class->GetEmbeddedVTableEntry(i, image_pointer_size_), image_pointer_size_); } if (num_virtual_methods == 0) { klass->SetVTable(vtable.Get()); return true; } } else { - mirror::ObjectArray<mirror::ArtMethod>* super_vtable = super_class->GetVTable(); + auto* super_vtable = super_class->GetVTable(); CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get()); if (num_virtual_methods == 0) { klass->SetVTable(super_vtable); return true; } - vtable = hs.NewHandle(super_vtable->CopyOf(self, max_count)); + vtable = hs.NewHandle(down_cast<mirror::PointerArray*>( + super_vtable->CopyOf(self, max_count))); if (UNLIKELY(vtable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } } @@ -4537,21 +4567,24 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) hash_heap_storage.reset(new uint32_t[hash_table_size]); hash_table_ptr = hash_heap_storage.get(); } - LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr); + LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr, image_pointer_size_); // Add virtual methods to the hash table. for (size_t i = 0; i < num_virtual_methods; ++i) { + DCHECK(klass->GetVirtualMethodDuringLinking( + i, image_pointer_size_)->GetDeclaringClass() != nullptr); hash_table.Add(i); } // Loop through each super vtable method and see if they are overriden by a method we added to // the hash table. for (size_t j = 0; j < super_vtable_length; ++j) { // Search the hash table to see if we are overidden by any method. - mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j); + ArtMethod* super_method = vtable->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_); MethodNameAndSignatureComparator super_method_name_comparator( - super_method->GetInterfaceMethodIfProxy()); + super_method->GetInterfaceMethodIfProxy(image_pointer_size_)); uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator); if (hash_index != hash_table.GetNotFoundIndex()) { - mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(hash_index); + ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking( + hash_index, image_pointer_size_); if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) { if (super_method->IsFinal()) { @@ -4560,7 +4593,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) super_method->GetDeclaringClassDescriptor()); return false; } - vtable->SetWithoutChecks<false>(j, virtual_method); + vtable->SetElementPtrSize(j, virtual_method, image_pointer_size_); virtual_method->SetMethodIndex(j); } else { LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(virtual_method) @@ -4572,13 +4605,13 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) // Add the non overridden methods at the end. size_t actual_count = super_vtable_length; for (size_t i = 0; i < num_virtual_methods; ++i) { - mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i); + ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_); size_t method_idx = local_method->GetMethodIndexDuringLinking(); if (method_idx < super_vtable_length && - local_method == vtable->GetWithoutChecks(method_idx)) { + local_method == vtable->GetElementPtrSize<ArtMethod*>(method_idx, image_pointer_size_)) { continue; } - vtable->SetWithoutChecks<false>(actual_count, local_method); + vtable->SetElementPtrSize(actual_count, local_method, image_pointer_size_); local_method->SetMethodIndex(actual_count); ++actual_count; } @@ -4589,9 +4622,9 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) // Shrink vtable if possible CHECK_LE(actual_count, max_count); if (actual_count < max_count) { - vtable.Assign(vtable->CopyOf(self, actual_count)); + vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, actual_count))); if (UNLIKELY(vtable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } } @@ -4603,14 +4636,14 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) static_cast<int>(num_virtual_methods)); return false; } - mirror::ObjectArray<mirror::ArtMethod>* vtable = AllocArtMethodArray(self, num_virtual_methods); + auto* vtable = AllocPointerArray(self, num_virtual_methods); if (UNLIKELY(vtable == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } for (size_t i = 0; i < num_virtual_methods; ++i) { - mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i); - vtable->SetWithoutChecks<false>(i, virtual_method); + ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_); + vtable->SetElementPtrSize(i, virtual_method, image_pointer_size_); virtual_method->SetMethodIndex(i & 0xFFFF); } klass->SetVTable(vtable); @@ -4620,7 +4653,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - StackHandleScope<mirror::Class::kImtSize>* out_imt) { + ArtMethod** out_imt) { StackHandleScope<3> hs(self); Runtime* const runtime = Runtime::Current(); const bool has_superclass = klass->HasSuperClass(); @@ -4628,6 +4661,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass const bool have_interfaces = interfaces.Get() != nullptr; const size_t num_interfaces = have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces(); + const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_); if (num_interfaces == 0) { if (super_ifcount == 0) { // Class implements no interfaces. @@ -4666,7 +4700,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount))); if (UNLIKELY(iftable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } if (super_ifcount != 0) { @@ -4715,9 +4749,10 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass // Shrink iftable in case duplicates were found if (idx < ifcount) { DCHECK_NE(num_interfaces, 0U); - iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax))); + iftable.Assign(down_cast<mirror::IfTable*>( + iftable->CopyOf(self, idx * mirror::IfTable::kMax))); if (UNLIKELY(iftable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } ifcount = idx; @@ -4729,15 +4764,18 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass if (klass->IsInterface()) { return true; } - size_t miranda_list_size = 0; - size_t max_miranda_methods = 0; // The max size of miranda_list. - for (size_t i = 0; i < ifcount; ++i) { - max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods(); - } - MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> - miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods))); - MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable( - hs.NewHandle(klass->GetVTableDuringLinking())); + // These are allocated on the heap to begin, we then transfer to linear alloc when we re-create + // the virtual methods array. + // Need to use low 4GB arenas for compiler or else the pointers wont fit in 32 bit method array + // during cross compilation. + // Use the linear alloc pool since this one is in the low 4gb for the compiler. + ArenaStack stack(runtime->GetLinearAlloc()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); + ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter()); + + MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking())); + ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod(); + ArtMethod* const conflict_method = runtime->GetImtConflictMethod(); // Copy the IMT from the super class if possible. bool extend_super_iftable = false; if (has_superclass) { @@ -4745,12 +4783,11 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass extend_super_iftable = true; if (super_class->ShouldHaveEmbeddedImtAndVTable()) { for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - out_imt->SetReference(i, super_class->GetEmbeddedImTableEntry(i)); + out_imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_); } } else { // No imt in the super class, need to reconstruct from the iftable. mirror::IfTable* if_table = super_class->GetIfTable(); - mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod(); const size_t length = super_class->GetIfTableCount(); for (size_t i = 0; i < length; ++i) { mirror::Class* interface = iftable->GetInterface(i); @@ -4760,63 +4797,84 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass if (method_array_count == 0) { continue; } - mirror::ObjectArray<mirror::ArtMethod>* method_array = if_table->GetMethodArray(i); + auto* method_array = if_table->GetMethodArray(i); for (size_t j = 0; j < num_virtuals; ++j) { - mirror::ArtMethod* method = method_array->GetWithoutChecks(j); + auto method = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_); + DCHECK(method != nullptr) << PrettyClass(super_class); if (method->IsMiranda()) { continue; } - mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j); + ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; - mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod(); - if (imt_ref == runtime->GetImtUnimplementedMethod()) { - out_imt->SetReference(imt_index, method); + auto*& imt_ref = out_imt[imt_index]; + if (imt_ref == unimplemented_method) { + imt_ref = method; } else if (imt_ref != conflict_method) { - out_imt->SetReference(imt_index, conflict_method); + imt_ref = conflict_method; } } } } } + // Allocate method arrays before since we don't want miss visiting miranda method roots due to + // thread suspension. for (size_t i = 0; i < ifcount; ++i) { - self->AllowThreadSuspension(); size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods(); if (num_methods > 0) { - StackHandleScope<2> hs2(self); const bool is_super = i < super_ifcount; const bool super_interface = is_super && extend_super_iftable; - Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array; - Handle<mirror::ObjectArray<mirror::ArtMethod>> input_array; + mirror::PointerArray* method_array; if (super_interface) { mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable(); DCHECK(if_table != nullptr); DCHECK(if_table->GetMethodArray(i) != nullptr); // If we are working on a super interface, try extending the existing method array. - method_array = hs2.NewHandle(if_table->GetMethodArray(i)->Clone(self)-> - AsObjectArray<mirror::ArtMethod>()); + method_array = down_cast<mirror::PointerArray*>(if_table->GetMethodArray(i)->Clone(self)); + } else { + method_array = AllocPointerArray(self, num_methods); + } + if (UNLIKELY(method_array == nullptr)) { + self->AssertPendingOOMException(); + return false; + } + iftable->SetMethodArray(i, method_array); + } + } + + auto* old_cause = self->StartAssertNoThreadSuspension( + "Copying ArtMethods for LinkInterfaceMethods"); + for (size_t i = 0; i < ifcount; ++i) { + size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods(); + if (num_methods > 0) { + StackHandleScope<2> hs2(self); + const bool is_super = i < super_ifcount; + const bool super_interface = is_super && extend_super_iftable; + auto method_array(hs2.NewHandle(iftable->GetMethodArray(i))); + + ArtMethod* input_virtual_methods = nullptr; + Handle<mirror::PointerArray> input_vtable_array = NullHandle<mirror::PointerArray>(); + int32_t input_array_length = 0; + if (super_interface) { // We are overwriting a super class interface, try to only virtual methods instead of the // whole vtable. - input_array = hs2.NewHandle(klass->GetVirtualMethods()); + input_virtual_methods = klass->GetVirtualMethodsPtr(); + input_array_length = klass->NumVirtualMethods(); } else { - method_array = hs2.NewHandle(AllocArtMethodArray(self, num_methods)); - // A new interface, we need the whole vtable incase a new interface method is implemented + // A new interface, we need the whole vtable in case a new interface method is implemented // in the whole superclass. - input_array = vtable; - } - if (UNLIKELY(method_array.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return false; + input_vtable_array = vtable; + input_array_length = input_vtable_array->GetLength(); } - iftable->SetMethodArray(i, method_array.Get()); - if (input_array.Get() == nullptr) { + if (input_array_length == 0) { // If the added virtual methods is empty, do nothing. DCHECK(super_interface); continue; } for (size_t j = 0; j < num_methods; ++j) { - mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j); + auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod( + j, image_pointer_size_); MethodNameAndSignatureComparator interface_name_comparator( - interface_method->GetInterfaceMethodIfProxy()); + interface_method->GetInterfaceMethodIfProxy(image_pointer_size_)); int32_t k; // For each method listed in the interface's method list, find the // matching method in our class's method list. We want to favor the @@ -4826,108 +4884,161 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass // it -- otherwise it would use the same vtable slot. In .dex files // those don't end up in the virtual method table, so it shouldn't // matter which direction we go. We walk it backward anyway.) - for (k = input_array->GetLength() - 1; k >= 0; --k) { - mirror::ArtMethod* vtable_method = input_array->GetWithoutChecks(k); - mirror::ArtMethod* vtable_method_for_name_comparison = - vtable_method->GetInterfaceMethodIfProxy(); + for (k = input_array_length - 1; k >= 0; --k) { + ArtMethod* vtable_method = input_virtual_methods != nullptr ? + reinterpret_cast<ArtMethod*>( + reinterpret_cast<uintptr_t>(input_virtual_methods) + method_size * k) : + input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_); + ArtMethod* vtable_method_for_name_comparison = + vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_); if (interface_name_comparator.HasSameNameAndSignature( vtable_method_for_name_comparison)) { if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) { - ThrowIllegalAccessError( - klass.Get(), + ThrowIllegalAccessError(klass.Get(), "Method '%s' implementing interface method '%s' is not public", - PrettyMethod(vtable_method).c_str(), - PrettyMethod(interface_method).c_str()); + PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str()); return false; } - method_array->SetWithoutChecks<false>(j, vtable_method); + method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_); // Place method in imt if entry is empty, place conflict otherwise. uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; - mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod(); - mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod(); - if (imt_ref == runtime->GetImtUnimplementedMethod()) { - out_imt->SetReference(imt_index, vtable_method); - } else if (imt_ref != conflict_method) { + auto** imt_ref = &out_imt[imt_index]; + if (*imt_ref == unimplemented_method) { + *imt_ref = vtable_method; + } else if (*imt_ref != conflict_method) { // If we are not a conflict and we have the same signature and name as the imt entry, // it must be that we overwrote a superclass vtable entry. - MethodNameAndSignatureComparator imt_ref_name_comparator( - imt_ref->GetInterfaceMethodIfProxy()); - if (imt_ref_name_comparator.HasSameNameAndSignature( - vtable_method_for_name_comparison)) { - out_imt->SetReference(imt_index, vtable_method); - } else { - out_imt->SetReference(imt_index, conflict_method); - } + MethodNameAndSignatureComparator imt_comparator( + (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_)); + *imt_ref = imt_comparator.HasSameNameAndSignature(vtable_method_for_name_comparison) ? + vtable_method : conflict_method; } break; } } if (k < 0 && !super_interface) { - mirror::ArtMethod* miranda_method = nullptr; - for (size_t l = 0; l < miranda_list_size; ++l) { - mirror::ArtMethod* mir_method = miranda_list->Get(l); + ArtMethod* miranda_method = nullptr; + for (auto& mir_method : miranda_methods) { if (interface_name_comparator.HasSameNameAndSignature(mir_method)) { miranda_method = mir_method; break; } } if (miranda_method == nullptr) { + size_t size = ArtMethod::ObjectSize(image_pointer_size_); + miranda_method = reinterpret_cast<ArtMethod*>(allocator.Alloc(size)); + CHECK(miranda_method != nullptr); // Point the interface table at a phantom slot. - miranda_method = interface_method->Clone(self)->AsArtMethod(); - if (UNLIKELY(miranda_method == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. - return false; - } - DCHECK_LT(miranda_list_size, max_miranda_methods); - miranda_list->Set<false>(miranda_list_size++, miranda_method); + new(miranda_method) ArtMethod(*interface_method, image_pointer_size_); + miranda_methods.push_back(miranda_method); } - method_array->SetWithoutChecks<false>(j, miranda_method); + method_array->SetElementPtrSize(j, miranda_method, image_pointer_size_); } } } } - if (miranda_list_size > 0) { - int old_method_count = klass->NumVirtualMethods(); - int new_method_count = old_method_count + miranda_list_size; - mirror::ObjectArray<mirror::ArtMethod>* virtuals; - if (old_method_count == 0) { - virtuals = AllocArtMethodArray(self, new_method_count); - } else { - virtuals = klass->GetVirtualMethods()->CopyOf(self, new_method_count); - } + if (!miranda_methods.empty()) { + const size_t old_method_count = klass->NumVirtualMethods(); + const size_t new_method_count = old_method_count + miranda_methods.size(); + // Attempt to realloc to save RAM if possible. + ArtMethod* old_virtuals = klass->GetVirtualMethodsPtr(); + // The Realloced virtual methods aren't visiblef from the class roots, so there is no issue + // where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the + // realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since + // CopyFrom has internal read barriers. + auto* virtuals = reinterpret_cast<ArtMethod*>(runtime->GetLinearAlloc()->Realloc( + self, old_virtuals, old_method_count * method_size, new_method_count * method_size)); if (UNLIKELY(virtuals == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } - klass->SetVirtualMethods(virtuals); + ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter()); + if (virtuals != old_virtuals) { + // Maps from heap allocated miranda method to linear alloc miranda method. + StrideIterator<ArtMethod> out(reinterpret_cast<uintptr_t>(virtuals), method_size); + // Copy over the old methods + miranda methods. + for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) { + move_table.emplace(&m, &*out); + // The CopyFrom is only necessary to not miss read barriers since Realloc won't do read + // barriers when it copies. + out->CopyFrom(&m, image_pointer_size_); + ++out; + } + } + UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count); + // Done copying methods, they are all reachable from the class now, so we can end the no thread + // suspension assert. + self->EndAssertNoThreadSuspension(old_cause); - int old_vtable_count = vtable->GetLength(); - int new_vtable_count = old_vtable_count + miranda_list_size; - vtable.Assign(vtable->CopyOf(self, new_vtable_count)); + size_t old_vtable_count = vtable->GetLength(); + const size_t new_vtable_count = old_vtable_count + miranda_methods.size(); + vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count))); if (UNLIKELY(vtable.Get() == nullptr)) { - CHECK(self->IsExceptionPending()); // OOME. + self->AssertPendingOOMException(); return false; } - for (size_t i = 0; i < miranda_list_size; ++i) { - mirror::ArtMethod* method = miranda_list->Get(i); + StrideIterator<ArtMethod> out( + reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + for (auto* mir_method : miranda_methods) { + ArtMethod* out_method = &*out; + out->CopyFrom(mir_method, image_pointer_size_); // Leave the declaring class alone as type indices are relative to it - method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda); - method->SetMethodIndex(0xFFFF & (old_vtable_count + i)); - klass->SetVirtualMethod(old_method_count + i, method); - vtable->SetWithoutChecks<false>(old_vtable_count + i, method); + out_method->SetAccessFlags(out_method->GetAccessFlags() | kAccMiranda); + out_method->SetMethodIndex(0xFFFF & old_vtable_count); + vtable->SetElementPtrSize(old_vtable_count, out_method, image_pointer_size_); + move_table.emplace(mir_method, out_method); + ++out; + ++old_vtable_count; + } + + // Update old vtable methods. + for (size_t i = 0; i < old_vtable_count - miranda_methods.size(); ++i) { + auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_); + DCHECK(m != nullptr) << PrettyClass(klass.Get()); + auto it = move_table.find(m); + if (it != move_table.end()) { + auto* new_m = it->second; + DCHECK(new_m != nullptr) << PrettyClass(klass.Get()); + vtable->SetElementPtrSize(i, new_m, image_pointer_size_); + } } - // TODO: do not assign to the vtable field until it is fully constructed. klass->SetVTable(vtable.Get()); + CHECK_EQ(old_vtable_count, new_vtable_count); + // Go fix up all the stale miranda pointers. + for (size_t i = 0; i < ifcount; ++i) { + for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) { + auto* method_array = iftable->GetMethodArray(i); + auto* m = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_); + DCHECK(m != nullptr) << PrettyClass(klass.Get()); + auto it = move_table.find(m); + if (it != move_table.end()) { + auto* new_m = it->second; + DCHECK(new_m != nullptr) << PrettyClass(klass.Get()); + method_array->SetElementPtrSize(j, new_m, image_pointer_size_); + } + } + } + // Check that there are no stale methods are in the dex cache array. + if (kIsDebugBuild) { + auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods(); + for (size_t i = 0, count = resolved_methods->GetLength(); i < count; ++i) { + auto* m = resolved_methods->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_); + CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m); + } + } + // Put some random garbage in old virtuals to help find stale pointers. + if (virtuals != old_virtuals) { + memset(old_virtuals, 0xFEu, ArtMethod::ObjectSize(image_pointer_size_) * old_method_count); + } + } else { + self->EndAssertNoThreadSuspension(old_cause); } - if (kIsDebugBuild) { - mirror::ObjectArray<mirror::ArtMethod>* check_vtable = klass->GetVTableDuringLinking(); + auto* check_vtable = klass->GetVTableDuringLinking(); for (int i = 0; i < check_vtable->GetLength(); ++i) { - CHECK(check_vtable->GetWithoutChecks(i) != nullptr); + CHECK(check_vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_) != nullptr); } } - - self->AllowThreadSuspension(); return true; } @@ -4984,7 +5095,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ // Initialize field_offset MemberOffset field_offset(0); if (is_static) { - field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking(); + field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking(image_pointer_size_); } else { mirror::Class* super_class = klass->GetSuperClass(); if (super_class != nullptr) { @@ -5059,19 +5170,14 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ } else { klass->SetNumReferenceInstanceFields(num_reference_fields); if (!klass->IsVariableSize()) { - if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) { - size_t pointer_size = GetInstructionSetPointerSize(Runtime::Current()->GetInstructionSet()); - klass->SetObjectSize(mirror::ArtMethod::InstanceSize(pointer_size)); - } else { - std::string temp; - DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp); - size_t previous_size = klass->GetObjectSize(); - if (previous_size != 0) { - // Make sure that we didn't originally have an incorrect size. - CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp); - } - klass->SetObjectSize(size); + std::string temp; + DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp); + size_t previous_size = klass->GetObjectSize(); + if (previous_size != 0) { + // Make sure that we didn't originally have an incorrect size. + CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp); } + klass->SetObjectSize(size); } } @@ -5079,7 +5185,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ // Make sure that the fields array is ordered by name but all reference // offsets are at the beginning as far as alignment allows. MemberOffset start_ref_offset = is_static - ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking() + ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking(image_pointer_size_) : klass->GetFirstReferenceInstanceFieldOffset(); MemberOffset end_ref_offset(start_ref_offset.Uint32Value() + num_reference_fields * @@ -5203,19 +5309,19 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i } } DCHECK((resolved == nullptr) || resolved->IsResolved() || resolved->IsErroneous()) - << PrettyDescriptor(resolved) << " " << resolved->GetStatus(); + << PrettyDescriptor(resolved) << " " << resolved->GetStatus(); return resolved; } -mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - Handle<mirror::ArtMethod> referrer, - InvokeType type) { +ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + ArtMethod* referrer, InvokeType type) { DCHECK(dex_cache.Get() != nullptr); // Check for hit in the dex cache. - mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx); + ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_); if (resolved != nullptr && !resolved->IsRuntimeMethod()) { + DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex(); return resolved; } // Fail, get the declaring class. @@ -5230,15 +5336,16 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t switch (type) { case kDirect: // Fall-through. case kStatic: - resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx); + resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx, image_pointer_size_); + DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr); break; case kInterface: - resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx); + resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_); DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface()); break; case kSuper: // Fall-through. case kVirtual: - resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx); + resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_); break; default: LOG(FATAL) << "Unreachable - invocation type: " << type; @@ -5251,27 +5358,28 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t switch (type) { case kDirect: // Fall-through. case kStatic: - resolved = klass->FindDirectMethod(name, signature); + resolved = klass->FindDirectMethod(name, signature, image_pointer_size_); + DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr); break; case kInterface: - resolved = klass->FindInterfaceMethod(name, signature); + resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_); DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface()); break; case kSuper: // Fall-through. case kVirtual: - resolved = klass->FindVirtualMethod(name, signature); + resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_); break; } } // If we found a method, check for incompatible class changes. if (LIKELY(resolved != nullptr && !resolved->CheckIncompatibleClassChange(type))) { // Be a good citizen and update the dex cache to speed subsequent calls. - dex_cache->SetResolvedMethod(method_idx, resolved); + dex_cache->SetResolvedMethod(method_idx, resolved, image_pointer_size_); return resolved; } else { // If we had a method, it's an incompatible-class-change error. if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer); } else { // We failed to find the method which means either an access error, an incompatible class // change, or no such method. First try to find the method among direct and virtual methods. @@ -5280,28 +5388,27 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t switch (type) { case kDirect: case kStatic: - resolved = klass->FindVirtualMethod(name, signature); + resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_); // Note: kDirect and kStatic are also mutually exclusive, but in that case we would // have had a resolved method before, which triggers the "true" branch above. break; case kInterface: case kVirtual: case kSuper: - resolved = klass->FindDirectMethod(name, signature); + resolved = klass->FindDirectMethod(name, signature, image_pointer_size_); break; } // If we found something, check that it can be accessed by the referrer. bool exception_generated = false; - if (resolved != nullptr && referrer.Get() != nullptr) { + if (resolved != nullptr && referrer != nullptr) { mirror::Class* methods_class = resolved->GetDeclaringClass(); mirror::Class* referring_class = referrer->GetDeclaringClass(); if (!referring_class->CanAccess(methods_class)) { - ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, - resolved, type); + ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, resolved, + type); exception_generated = true; - } else if (!referring_class->CanAccessMember(methods_class, - resolved->GetAccessFlags())) { + } else if (!referring_class->CanAccessMember(methods_class, resolved->GetAccessFlags())) { ThrowIllegalAccessErrorMethod(referring_class, resolved); exception_generated = true; } @@ -5314,11 +5421,11 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t case kDirect: case kStatic: if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer); } else { - resolved = klass->FindInterfaceMethod(name, signature); + resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_); if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer); } else { ThrowNoSuchMethodError(type, klass, name, signature); } @@ -5326,11 +5433,11 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t break; case kInterface: if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer); } else { - resolved = klass->FindVirtualMethod(name, signature); + resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_); if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer); } else { ThrowNoSuchMethodError(type, klass, name, signature); } @@ -5338,18 +5445,18 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t break; case kSuper: if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer); } else { ThrowNoSuchMethodError(type, klass, name, signature); } break; case kVirtual: if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer); } else { - resolved = klass->FindInterfaceMethod(name, signature); + resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_); if (resolved != nullptr) { - ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get()); + ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer); } else { ThrowNoSuchMethodError(type, klass, name, signature); } @@ -5434,7 +5541,7 @@ ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_i return resolved; } -const char* ClassLinker::MethodShorty(uint32_t method_idx, mirror::ArtMethod* referrer, +const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); mirror::DexCache* dex_cache = declaring_class->GetDexCache(); @@ -5489,14 +5596,14 @@ const void* ClassLinker::GetRuntimeQuickGenericJniStub() const { return GetQuickGenericJniStub(); } -void ClassLinker::SetEntryPointsToCompiledCode(mirror::ArtMethod* method, +void ClassLinker::SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const { OatFile::OatMethod oat_method = CreateOatMethod(method_code); oat_method.LinkMethod(method); method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); } -void ClassLinker::SetEntryPointsToInterpreter(mirror::ArtMethod* method) const { +void ClassLinker::SetEntryPointsToInterpreter(ArtMethod* method) const { if (!method->IsNative()) { method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge); method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); @@ -5557,13 +5664,11 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { "Ljava/lang/String;", "Ljava/lang/DexCache;", "Ljava/lang/ref/Reference;", - "Ljava/lang/reflect/ArtMethod;", "Ljava/lang/reflect/Constructor;", "Ljava/lang/reflect/Field;", "Ljava/lang/reflect/Method;", "Ljava/lang/reflect/Proxy;", "[Ljava/lang/String;", - "[Ljava/lang/reflect/ArtMethod;", "[Ljava/lang/reflect/Constructor;", "[Ljava/lang/reflect/Field;", "[Ljava/lang/reflect/Method;", @@ -5635,7 +5740,7 @@ std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descr return ComputeModifiedUtf8Hash(descriptor); } -bool ClassLinker::MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) { +bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) { if (Runtime::Current()->UseJit()) { // JIT can have direct code pointers from any method to any other method. return true; @@ -5757,4 +5862,12 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi return soa.Env()->NewGlobalRef(local_ref.get()); } +ArtMethod* ClassLinker::CreateRuntimeMethod() { + ArtMethod* method = AllocArtMethodArray(Thread::Current(), 1); + CHECK(method != nullptr); + method->SetDexMethodIndex(DexFile::kDexNoIndex); + CHECK(method->IsRuntimeMethod()); + return method; +} + } // namespace art diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 947e15210b..fa8b2e796b 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -42,6 +42,7 @@ namespace space { namespace mirror { class ClassLoader; class DexCache; + class DexCachePointerArray; class DexCacheTest_Open_Test; class IfTable; template<class T> class ObjectArray; @@ -71,13 +72,11 @@ class ClassLinker { kJavaLangString, kJavaLangDexCache, kJavaLangRefReference, - kJavaLangReflectArtMethod, kJavaLangReflectConstructor, kJavaLangReflectField, kJavaLangReflectMethod, kJavaLangReflectProxy, kJavaLangStringArrayClass, - kJavaLangReflectArtMethodArrayClass, kJavaLangReflectConstructorArrayClass, kJavaLangReflectFieldArrayClass, kJavaLangReflectMethodArrayClass, @@ -187,7 +186,7 @@ class ClassLinker { // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - mirror::String* ResolveString(uint32_t string_idx, mirror::ArtMethod* referrer) + mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the @@ -205,7 +204,7 @@ class ClassLinker { // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. - mirror::Class* ResolveType(uint16_t type_idx, mirror::ArtMethod* referrer) + mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer) @@ -225,25 +224,22 @@ class ClassLinker { // in ResolveType. What is unique is the method type argument which // is used to determine if this method is a direct, static, or // virtual method. - mirror::ArtMethod* ResolveMethod(const DexFile& dex_file, - uint32_t method_idx, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - Handle<mirror::ArtMethod> referrer, - InvokeType type) + ArtMethod* ResolveMethod(const DexFile& dex_file, uint32_t method_idx, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer) + ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer, - InvokeType type) + ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtField* ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer, bool is_static) + ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the @@ -263,12 +259,12 @@ class ClassLinker { // in ResolveType. No is_static argument is provided so that Java // field resolution semantics are followed. ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader) + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. - const char* MethodShorty(uint32_t method_idx, mirror::ArtMethod* referrer, uint32_t* length) + const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. @@ -323,7 +319,7 @@ class ClassLinker { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FixupDexCaches(mirror::ArtMethod* resolution_method) + void FixupDexCaches(ArtMethod* resolution_method) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -360,7 +356,9 @@ class ClassLinker { mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::ArtMethod>* AllocArtMethodArray(Thread* self, size_t length) + ArtMethod* AllocArtMethodArray(Thread* self, size_t length); + + mirror::PointerArray* AllocPointerArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) @@ -381,7 +379,7 @@ class ClassLinker { void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Handle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass) + void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name, @@ -390,23 +388,23 @@ class ClassLinker { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string GetDescriptorForProxy(mirror::Class* proxy_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, - mirror::ArtMethod* proxy_method) + ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized - const void* GetQuickOatCodeFor(mirror::ArtMethod* method) + const void* GetQuickOatCodeFor(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method from a method index. - const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx) + const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, + uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get compiled code for a method, return null if no code // exists. This is unlike Get..OatCodeFor which will return a bridge // or interpreter entrypoint. - const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method) + const void* GetOatMethodQuickCodeFor(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetClassesLockOwner(); // For SignalCatcher. @@ -430,11 +428,11 @@ class ClassLinker { } // Set the entrypoints up for method to the given code. - void SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code) const + void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Set the entrypoints up for method to the enter the interpreter. - void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const + void SetEntryPointsToInterpreter(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Attempts to insert a class into a class table. Returns null if @@ -444,9 +442,6 @@ class ClassLinker { LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Special code to allocate an art method, use this instead of class->AllocObject. - mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read(); DCHECK(class_roots != nullptr); @@ -465,7 +460,7 @@ class ClassLinker { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if the method can be called with its direct code pointer, false otherwise. - bool MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) + bool MayBeCalledWithDirectCodePointer(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files. @@ -473,11 +468,20 @@ class ClassLinker { jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - private: - static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) + size_t GetImagePointerSize() const { + DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_; + return image_pointer_size_; + } + + // Used by image writer for checking. + bool ClassInClassTable(mirror::Class* klass) + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found) + ArtMethod* CreateRuntimeMethod(); + + private: + const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); OatFile& GetImageOatFile(gc::space::ImageSpace* space) @@ -535,9 +539,8 @@ class ClassLinker { ArtField* dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file, - const ClassDataItemIterator& dex_method, - Handle<mirror::Class> klass) + void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it, + Handle<mirror::Class> klass, ArtMethod* dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -566,9 +569,8 @@ class ClassLinker { Handle<mirror::ClassLoader> class_loader2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method, - mirror::Class* klass1, - mirror::Class* klass2) + bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method, + mirror::Class* klass1, mirror::Class* klass2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass, @@ -584,15 +586,15 @@ class ClassLinker { bool LinkMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - StackHandleScope<mirror::Class::kImtSize>* out_imt) + ArtMethod** out_imt) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool LinkInterfaceMethods(Thread* const self, Handle<mirror::Class> klass, + bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, - StackHandleScope<mirror::Class::kImtSize>* out_imt) + ArtMethod** out_imt) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) @@ -601,12 +603,17 @@ class ClassLinker { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class, + void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class, uint32_t class_def_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckProxyConstructor(ArtMethod* constructor) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // For use by ImageWriter to find DexCaches for its roots ReaderWriterMutex* DexLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) { @@ -623,10 +630,9 @@ class ClassLinker { // Returns the boot image oat file. const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_); - mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass) + void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* CreateProxyMethod(Thread* self, Handle<mirror::Class> klass, - Handle<mirror::ArtMethod> prototype) + void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the @@ -673,6 +679,16 @@ class ClassLinker { // Check for duplicate class definitions of the given oat file against all open oat files. bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_); + bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, + size_t new_num_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); + std::vector<const DexFile*> boot_class_path_; std::vector<std::unique_ptr<const DexFile>> opened_dex_files_; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index d155941f23..a4e0227a6b 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -20,6 +20,7 @@ #include <string> #include "art_field-inl.h" +#include "art_method-inl.h" #include "class_linker-inl.h" #include "common_runtime_test.h" #include "dex_file.h" @@ -27,7 +28,6 @@ #include "gc/heap.h" #include "mirror/abstract_method.h" #include "mirror/accessible_object.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/field.h" @@ -159,9 +159,9 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get()); } - void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { EXPECT_TRUE(method != nullptr); - EXPECT_TRUE(method->GetClass() != nullptr); + EXPECT_TRUE(method->GetDeclaringClass() != nullptr); EXPECT_TRUE(method->GetName() != nullptr); EXPECT_TRUE(method->GetSignature() != Signature::NoSignature()); @@ -208,8 +208,8 @@ class ClassLinkerTest : public CommonRuntimeTest { if (klass->IsInterface()) { EXPECT_TRUE(klass->IsAbstract()); if (klass->NumDirectMethods() == 1) { - EXPECT_TRUE(klass->GetDirectMethod(0)->IsClassInitializer()); - EXPECT_TRUE(klass->GetDirectMethod(0)->IsDirect()); + EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsClassInitializer()); + EXPECT_TRUE(klass->GetDirectMethod(0, sizeof(void*))->IsDirect()); } else { EXPECT_EQ(0U, klass->NumDirectMethods()); } @@ -246,18 +246,16 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_FALSE(klass->IsPrimitive()); EXPECT_TRUE(klass->CanAccess(klass.Get())); - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - mirror::ArtMethod* method = klass->GetDirectMethod(i); - AssertMethod(method); - EXPECT_TRUE(method->IsDirect()); - EXPECT_EQ(klass.Get(), method->GetDeclaringClass()); + for (ArtMethod& method : klass->GetDirectMethods(sizeof(void*))) { + AssertMethod(&method); + EXPECT_TRUE(method.IsDirect()); + EXPECT_EQ(klass.Get(), method.GetDeclaringClass()); } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - mirror::ArtMethod* method = klass->GetVirtualMethod(i); - AssertMethod(method); - EXPECT_FALSE(method->IsDirect()); - EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.Get())); + for (ArtMethod& method : klass->GetVirtualMethods(sizeof(void*))) { + AssertMethod(&method); + EXPECT_FALSE(method.IsDirect()); + EXPECT_TRUE(method.GetDeclaringClass()->IsAssignableFrom(klass.Get())); } for (size_t i = 0; i < klass->NumInstanceFields(); i++) { @@ -358,9 +356,10 @@ class ClassLinkerTest : public CommonRuntimeTest { class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots); // Verify the dex cache has resolution methods in all resolved method slots mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex); - mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods(); + auto* resolved_methods = dex_cache->GetResolvedMethods(); for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) { - EXPECT_TRUE(resolved_methods->Get(i) != nullptr) << dex.GetLocation() << " i=" << i; + EXPECT_TRUE(resolved_methods->GetElementPtrSize<ArtMethod*>(i, sizeof(void*)) != nullptr) + << dex.GetLocation() << " i=" << i; } } @@ -394,9 +393,8 @@ struct CheckOffsets { bool error = false; - // Methods and classes have a different size due to padding field. Strings are variable length. - if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !klass->IsStringClass() && - !is_static) { + // Classes have a different size due to padding field. Strings are variable length. + if (!klass->IsClassClass() && !klass->IsStringClass() && !is_static) { // Currently only required for AccessibleObject since of the padding fields. The class linker // says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding. // The RoundUp is to get around this case. @@ -487,20 +485,6 @@ struct ObjectOffsets : public CheckOffsets<mirror::Object> { }; }; -struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> { - ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") { - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), - "dexCacheResolvedMethods"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), - "dexCacheResolvedTypes"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"); - addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"); - }; -}; - struct ClassOffsets : public CheckOffsets<mirror::Class> { ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") { addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"); @@ -516,12 +500,14 @@ struct ClassOffsets : public CheckOffsets<mirror::Class> { addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"); addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"); + addOffset(OFFSETOF_MEMBER(mirror::Class, num_direct_methods_), "numDirectMethods"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields"); + addOffset(OFFSETOF_MEMBER(mirror::Class, num_virtual_methods_), "numVirtualMethods"); addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"); addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"); addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), @@ -641,7 +627,6 @@ struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> { TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { ScopedObjectAccess soa(Thread::Current()); EXPECT_TRUE(ObjectOffsets().Check()); - EXPECT_TRUE(ArtMethodOffsets().Check()); EXPECT_TRUE(ClassOffsets().Check()); EXPECT_TRUE(StringOffsets().Check()); EXPECT_TRUE(ThrowableOffsets().Check()); @@ -899,7 +884,7 @@ TEST_F(ClassLinkerTest, StaticFields) { // Static final primitives that are initialized by a compile-time constant // expression resolve to a copy of a constant value from the constant pool. // So <clinit> should be null. - mirror::ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V"); + ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", sizeof(void*)); EXPECT_TRUE(clinit == nullptr); EXPECT_EQ(9U, statics->NumStaticFields()); @@ -986,15 +971,15 @@ TEST_F(ClassLinkerTest, Interfaces) { EXPECT_TRUE(J->IsAssignableFrom(B.Get())); const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V"); - mirror::ArtMethod* Ii = I->FindVirtualMethod("i", void_sig); - mirror::ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig); - mirror::ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig); - mirror::ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig); - mirror::ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig); - mirror::ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig); - mirror::ArtMethod* Ai = A->FindVirtualMethod("i", void_sig); - mirror::ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig); - mirror::ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig); + ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, sizeof(void*)); + ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, sizeof(void*)); + ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, sizeof(void*)); + ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, sizeof(void*)); + ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, sizeof(void*)); + ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, sizeof(void*)); + ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, sizeof(void*)); + ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, sizeof(void*)); + ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, sizeof(void*)); ASSERT_TRUE(Ii != nullptr); ASSERT_TRUE(Jj1 != nullptr); ASSERT_TRUE(Jj2 != nullptr); @@ -1009,21 +994,17 @@ TEST_F(ClassLinkerTest, Interfaces) { EXPECT_NE(Jj2, Aj2); EXPECT_EQ(Kj1, Jj1); EXPECT_EQ(Kj2, Jj2); - EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii)); - EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1)); - EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2)); - EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii)); - EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1)); - EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2)); - - ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo", - "Ljava/lang/String;"); - ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo", - "Ljava/lang/String;"); - ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo", - "Ljava/lang/String;"); - ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo", - "Ljava/lang/String;"); + EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, sizeof(void*))); + EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, sizeof(void*))); + EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, sizeof(void*))); + EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, sizeof(void*))); + EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, sizeof(void*))); + EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, sizeof(void*))); + + ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo", "Ljava/lang/String;"); + ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo", "Ljava/lang/String;"); + ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo", "Ljava/lang/String;"); + ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo", "Ljava/lang/String;"); ASSERT_TRUE(Afoo != nullptr); EXPECT_EQ(Afoo, Bfoo); EXPECT_EQ(Afoo, Jfoo); @@ -1043,17 +1024,17 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) { Handle<mirror::ClassLoader> class_loader( hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader); - mirror::ArtMethod* clinit = klass->FindClassInitializer(); - mirror::ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;"); + ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*)); + ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*)); const DexFile::StringId* string_id = dex_file->FindStringId("LStaticsFromCode;"); ASSERT_TRUE(string_id != nullptr); const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id)); ASSERT_TRUE(type_id != nullptr); uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id); - mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, Thread::Current(), true, false); + mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, soa.Self(), true, false); EXPECT_TRUE(uninit != nullptr); EXPECT_FALSE(uninit->IsInitialized()); - mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, Thread::Current(), true, false); + mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, soa.Self(), true, false); EXPECT_TRUE(init != nullptr); EXPECT_TRUE(init->IsInitialized()); } @@ -1109,22 +1090,23 @@ TEST_F(ClassLinkerTest, ValidatePredefinedClassSizes) { mirror::Class* c; c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Class;", class_loader); - EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize()); + ASSERT_TRUE(c != nullptr); + EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(sizeof(void*))); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Object;", class_loader); - EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize()); + ASSERT_TRUE(c != nullptr); + EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(sizeof(void*))); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader); - EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize()); + ASSERT_TRUE(c != nullptr); + EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(sizeof(void*))); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/DexCache;", class_loader); - EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize()); - - c = class_linker_->FindClass(soa.Self(), "Ljava/lang/reflect/ArtMethod;", class_loader); - EXPECT_EQ(c->GetClassSize(), mirror::ArtMethod::ClassSize()); + ASSERT_TRUE(c != nullptr); + EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(sizeof(void*))); } -static void CheckMethod(mirror::ArtMethod* method, bool verified) +static void CheckMethod(ArtMethod* method, bool verified) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!method->IsNative() && !method->IsAbstract()) { EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified) @@ -1136,11 +1118,11 @@ static void CheckPreverified(mirror::Class* c, bool preverified) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified) << "Class " << PrettyClass(c) << " not as expected"; - for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) { - CheckMethod(c->GetDirectMethod(i), preverified); + for (auto& m : c->GetDirectMethods(sizeof(void*))) { + CheckMethod(&m, preverified); } - for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) { - CheckMethod(c->GetVirtualMethod(i), preverified); + for (auto& m : c->GetVirtualMethods(sizeof(void*))) { + CheckMethod(&m, preverified); } } diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index de3a29b0d4..5f9e413ed2 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -24,6 +24,7 @@ #include <stdlib.h> #include "../../external/icu/icu4c/source/common/unicode/uvernum.h" +#include "art_field-inl.h" #include "base/macros.h" #include "base/logging.h" #include "base/stl_util.h" @@ -31,17 +32,19 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "compiler_callbacks.h" -#include "dex_file.h" +#include "dex_file-inl.h" #include "gc_root-inl.h" #include "gc/heap.h" #include "gtest/gtest.h" #include "handle_scope-inl.h" #include "interpreter/unstarted_runtime.h" #include "jni_internal.h" +#include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mem_map.h" #include "noop_compiler_callbacks.h" #include "os.h" +#include "primitive.h" #include "runtime-inl.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index fb81ad2f01..3acd366cd2 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -19,12 +19,12 @@ #include <sstream> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "invoke_type.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -78,7 +78,7 @@ static void ThrowWrappedException(const char* exception_descriptor, // AbstractMethodError -void ThrowAbstractMethodError(mirror::ArtMethod* method) { +void ThrowAbstractMethodError(ArtMethod* method) { ThrowException("Ljava/lang/AbstractMethodError;", nullptr, StringPrintf("abstract method \"%s\"", PrettyMethod(method).c_str()).c_str()); @@ -145,7 +145,7 @@ void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* access } void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, - mirror::ArtMethod* called, + ArtMethod* called, InvokeType type) { std::ostringstream msg; msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '" @@ -154,7 +154,7 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } -void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed) { +void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) { std::ostringstream msg; msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '" << PrettyDescriptor(referrer) << "'"; @@ -168,13 +168,12 @@ void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) { ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str()); } -void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer, - ArtField* accessed) { +void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed) { std::ostringstream msg; msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '" << PrettyMethod(referrer) << "'"; ThrowException("Ljava/lang/IllegalAccessError;", - referrer != nullptr ? referrer->GetClass() : nullptr, + referrer != nullptr ? referrer->GetDeclaringClass() : nullptr, msg.str().c_str()); } @@ -201,19 +200,18 @@ void ThrowIllegalArgumentException(const char* msg) { // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, - mirror::ArtMethod* method, - mirror::ArtMethod* referrer) { + ArtMethod* method, ArtMethod* referrer) { std::ostringstream msg; msg << "The method '" << PrettyMethod(method) << "' was expected to be of type " << expected_type << " but instead was found to be of type " << found_type; ThrowException("Ljava/lang/IncompatibleClassChangeError;", - referrer != nullptr ? referrer->GetClass() : nullptr, + referrer != nullptr ? referrer->GetDeclaringClass() : nullptr, msg.str().c_str()); } -void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMethod* interface_method, +void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method, mirror::Object* this_object, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { // Referrer is calling interface_method on this_object, however, the interface_method isn't // implemented by this_object. CHECK(this_object != nullptr); @@ -223,17 +221,17 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMetho << PrettyDescriptor(interface_method->GetDeclaringClass()) << "' in call to '" << PrettyMethod(interface_method) << "'"; ThrowException("Ljava/lang/IncompatibleClassChangeError;", - referrer != nullptr ? referrer->GetClass() : nullptr, + referrer != nullptr ? referrer->GetDeclaringClass() : nullptr, msg.str().c_str()); } void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static, - mirror::ArtMethod* referrer) { + ArtMethod* referrer) { std::ostringstream msg; msg << "Expected '" << PrettyField(resolved_field) << "' to be a " << (is_static ? "static" : "instance") << " field" << " rather than a " << (is_static ? "instance" : "static") << " field"; - ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(), + ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetDeclaringClass(), msg.str().c_str()); } @@ -317,7 +315,7 @@ void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece } void ThrowNoSuchMethodError(uint32_t method_idx) { - mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr); + ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr); mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); std::ostringstream msg; @@ -353,7 +351,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx, ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type); } -void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method, +void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type) { mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); @@ -363,7 +361,7 @@ void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method, void ThrowNullPointerExceptionFromDexPC() { uint32_t throw_dex_pc; - mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc); + ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc); const DexFile::CodeItem* code = method->GetCodeItem(); CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_); const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]); @@ -390,7 +388,7 @@ void ThrowNullPointerExceptionFromDexPC() { case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { // Since we replaced the method index, we ask the verifier to tell us which // method is invoked at this location. - mirror::ArtMethod* invoked_method = + ArtMethod* invoked_method = verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc); if (invoked_method != nullptr) { // NPE with precise message. diff --git a/runtime/common_throws.h b/runtime/common_throws.h index bd667fadc7..b391c5b92e 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -22,17 +22,17 @@ namespace art { namespace mirror { - class ArtMethod; class Class; class Object; } // namespace mirror class ArtField; +class ArtMethod; class Signature; class StringPiece; // AbstractMethodError -void ThrowAbstractMethodError(mirror::ArtMethod* method) +void ThrowAbstractMethodError(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; // ArithmeticException @@ -74,17 +74,17 @@ void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* access SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, - mirror::ArtMethod* called, + ArtMethod* called, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; -void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed) +void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; -void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer, ArtField* accessed) +void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) @@ -104,16 +104,16 @@ void ThrowIllegalArgumentException(const char* msg) // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, - mirror::ArtMethod* method, mirror::ArtMethod* referrer) + ArtMethod* method, ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; -void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMethod* interface_method, +void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method, mirror::Object* this_object, - mirror::ArtMethod* referrer) + ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static, - mirror::ArtMethod* referrer) + ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) @@ -175,7 +175,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; -void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method, +void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 728e8e3ccb..24615e2a66 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -22,6 +22,7 @@ #include "arch/context.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/time_utils.h" #include "class_linker.h" #include "class_linker-inl.h" @@ -32,7 +33,6 @@ #include "gc/space/space-inl.h" #include "handle_scope.h" #include "jdwp/object_registry.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" @@ -78,17 +78,17 @@ class AllocRecordStackTraceElement { } int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = Method(); + ArtMethod* method = Method(); DCHECK(method != nullptr); return method->GetLineNumFromDexPC(DexPc()); } - mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); return soa.DecodeMethod(method_); } - void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); method_ = soa.EncodeMethod(m); } @@ -184,7 +184,7 @@ class AllocRecord { class Breakpoint { public: - Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, + Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) { @@ -202,7 +202,7 @@ class Breakpoint { method_ = soa.EncodeMethod(other.Method()); } - mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); return soa.DecodeMethod(method_); } @@ -235,7 +235,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati DebugInstrumentationListener() {} virtual ~DebugInstrumentationListener() {} - void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, + void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method->IsNative()) { @@ -261,7 +261,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati } } - void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, + void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (method->IsNative()) { @@ -279,14 +279,14 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati } void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method, uint32_t dex_pc) + ArtMethod* method, uint32_t dex_pc) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) << " " << dex_pc; } - void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, + void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) { @@ -308,13 +308,13 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati } void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field) + ArtMethod* method, uint32_t dex_pc, ArtField* field) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field); } void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value); @@ -326,14 +326,14 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati } // We only care about how many backward branches were executed in the Jit. - void BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, int32_t dex_pc_offset) + void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method) << " " << dex_pc_offset; } private: - static bool IsReturn(mirror::ArtMethod* method, uint32_t dex_pc) + static bool IsReturn(ArtMethod* method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = method->GetCodeItem(); const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]); @@ -408,11 +408,6 @@ static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { receiver.VisitRootIfNonNull(visitor, root_info); // null for static method call. klass.VisitRoot(visitor, root_info); - method.VisitRoot(visitor, root_info); -} - -void SingleStepControl::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { - method_.VisitRootIfNonNull(visitor, root_info); } void SingleStepControl::AddDexPc(uint32_t dex_pc) { @@ -423,7 +418,7 @@ bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { return dex_pcs_.find(dex_pc) == dex_pcs_.end(); } -static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc) +static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); @@ -1395,9 +1390,8 @@ JDWP::FieldId Dbg::ToFieldId(const ArtField* f) { return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f)); } -static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m) +static JDWP::MethodId ToMethodId(const ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(!kMovingMethods); return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m)); } @@ -1406,17 +1400,16 @@ static ArtField* FromFieldId(JDWP::FieldId fid) return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid)); } -static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid) +static ArtMethod* FromMethodId(JDWP::MethodId mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(!kMovingMethods); - return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid)); + return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid)); } bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) { CHECK(event_thread != nullptr); JDWP::JdwpError error; - mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id, - &error); + mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>( + expected_thread_id, &error); return expected_thread_peer == event_thread->GetPeer(); } @@ -1425,7 +1418,7 @@ bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location, if (expected_location.dex_pc != event_location.dex_pc) { return false; } - mirror::ArtMethod* m = FromMethodId(expected_location.method_id); + ArtMethod* m = FromMethodId(expected_location.method_id); return m == event_location.method; } @@ -1454,7 +1447,7 @@ bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* eve return modifier_instance == event_instance; } -void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc) +void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) { @@ -1470,11 +1463,11 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, ui } std::string Dbg::GetMethodName(JDWP::MethodId method_id) { - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); if (m == nullptr) { return "null"; } - return m->GetName(); + return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName(); } std::string Dbg::GetFieldName(JDWP::FieldId field_id) { @@ -1503,7 +1496,7 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) { * expect slots to begin with arguments, but dex code places them at * the end. */ -static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m) +static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { @@ -1525,14 +1518,14 @@ static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m) * Circularly shifts registers so that arguments come last. Reverts * slots to dex style argument placement. */ -static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m, JDWP::JdwpError* error) +static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // We should not get here for a method without code (native, proxy or abstract). Log it and // return the slot as is since all registers are arguments. LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m); - uint16_t vreg_count = mirror::ArtMethod::NumArgRegisters(m->GetShorty()); + uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty()); if (slot < vreg_count) { *error = JDWP::ERR_NONE; return slot; @@ -1591,14 +1584,18 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g expandBufAdd4BE(pReply, direct_method_count + virtual_method_count); + auto* cl = Runtime::Current()->GetClassLinker(); + auto ptr_size = cl->GetImagePointerSize(); for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { - mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); + ArtMethod* m = i < direct_method_count ? + c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size); expandBufAddMethodId(pReply, ToMethodId(m)); - expandBufAddUtf8String(pReply, m->GetName()); - expandBufAddUtf8String(pReply, m->GetSignature().ToString()); + expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName()); + expandBufAddUtf8String(pReply, + m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString()); if (with_generic) { - static const char genericSignature[1] = ""; - expandBufAddUtf8String(pReply, genericSignature); + const char* generic_signature = ""; + expandBufAddUtf8String(pReply, generic_signature); } expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags())); } @@ -1635,7 +1632,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan return false; } }; - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); const DexFile::CodeItem* code_item = m->GetCodeItem(); uint64_t start, end; if (code_item == nullptr) { @@ -1670,7 +1667,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, JDWP::ExpandBuf* pReply) { struct DebugCallbackContext { - mirror::ArtMethod* method; + ArtMethod* method; JDWP::ExpandBuf* pReply; size_t variable_count; bool with_generic; @@ -1699,12 +1696,12 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi ++pContext->variable_count; } }; - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); // arg_count considers doubles and longs to take 2 units. // variable_count considers everything to take 1 unit. std::string shorty(m->GetShorty()); - expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty)); + expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty)); // We don't know the total number of variables yet, so leave a blank and update it later. size_t variable_count_offset = expandBufGetLength(pReply); @@ -1728,7 +1725,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, JDWP::ExpandBuf* pReply) { - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty()); OutputJValue(tag, return_value, pReply); } @@ -1742,7 +1739,7 @@ void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id, std::vector<uint8_t>* bytecodes) { - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); if (m == nullptr) { return JDWP::ERR_INVALID_METHODID; } @@ -2470,7 +2467,7 @@ class FindFrameVisitor FINAL : public StackVisitor { if (GetFrameId() != frame_id_) { return true; // Not our frame, carry on. } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsNative()) { // We can't read/write local value from/into native method. error_ = JDWP::ERR_OPAQUE_FRAME; @@ -2548,7 +2545,7 @@ static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t v JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) { - mirror::ArtMethod* m = visitor.GetMethod(); + ArtMethod* m = visitor.GetMethod(); JDWP::JdwpError error = JDWP::ERR_NONE; uint16_t vreg = DemangleSlot(slot, m, &error); if (error != JDWP::ERR_NONE) { @@ -2711,7 +2708,7 @@ static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t v JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) { - mirror::ArtMethod* m = visitor.GetMethod(); + ArtMethod* m = visitor.GetMethod(); JDWP::JdwpError error = JDWP::ERR_NONE; uint16_t vreg = DemangleSlot(slot, m, &error); if (error != JDWP::ERR_NONE) { @@ -2786,7 +2783,7 @@ JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTa return JDWP::ERR_NONE; } -static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc) +static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(location != nullptr); if (m == nullptr) { @@ -2797,7 +2794,7 @@ static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m } } -void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object, +void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, int event_flags, const JValue* return_value) { if (!IsDebuggerActive()) { return; @@ -2830,7 +2827,7 @@ void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* th } } -void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, +void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f) { if (!IsDebuggerActive()) { return; @@ -2843,7 +2840,7 @@ void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false); } -void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc, +void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f, const JValue* field_value) { if (!IsDebuggerActive()) { @@ -2871,14 +2868,14 @@ class CatchLocationFinder : public StackVisitor { exception_(exception), handle_scope_(self), this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)), - catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)), - throw_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)), + catch_method_(nullptr), + throw_method_(nullptr), catch_dex_pc_(DexFile::kDexNoIndex), throw_dex_pc_(DexFile::kDexNoIndex) { } bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); + ArtMethod* method = GetMethod(); DCHECK(method != nullptr); if (method->IsRuntimeMethod()) { // Ignore callee save method. @@ -2887,25 +2884,23 @@ class CatchLocationFinder : public StackVisitor { } uint32_t dex_pc = GetDexPc(); - if (throw_method_.Get() == nullptr) { + if (throw_method_ == nullptr) { // First Java method found. It is either the method that threw the exception, // or the Java native method that is reporting an exception thrown by // native code. this_at_throw_.Assign(GetThisObject()); - throw_method_.Assign(method); + throw_method_ = method; throw_dex_pc_ = dex_pc; } if (dex_pc != DexFile::kDexNoIndex) { - StackHandleScope<2> hs(self_); + StackHandleScope<1> hs(self_); uint32_t found_dex_pc; Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass())); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(method)); bool unused_clear_exception; - found_dex_pc = mirror::ArtMethod::FindCatchBlock( - h_method, exception_class, dex_pc, &unused_clear_exception); + found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception); if (found_dex_pc != DexFile::kDexNoIndex) { - catch_method_.Assign(method); + catch_method_ = method; catch_dex_pc_ = found_dex_pc; return false; // End stack walk. } @@ -2913,12 +2908,12 @@ class CatchLocationFinder : public StackVisitor { return true; // Continue stack walk. } - mirror::ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return catch_method_.Get(); + ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return catch_method_; } - mirror::ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return throw_method_.Get(); + ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return throw_method_; } mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -2936,10 +2931,10 @@ class CatchLocationFinder : public StackVisitor { private: Thread* const self_; const Handle<mirror::Throwable>& exception_; - StackHandleScope<3> handle_scope_; + StackHandleScope<1> handle_scope_; MutableHandle<mirror::Object> this_at_throw_; - MutableHandle<mirror::ArtMethod> catch_method_; - MutableHandle<mirror::ArtMethod> throw_method_; + ArtMethod* catch_method_; + ArtMethod* throw_method_; uint32_t catch_dex_pc_; uint32_t throw_dex_pc_; @@ -2973,7 +2968,7 @@ void Dbg::PostClassPrepare(mirror::Class* c) { } void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* m, uint32_t dex_pc, + ArtMethod* m, uint32_t dex_pc, int event_flags, const JValue* return_value) { if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) { return; @@ -3222,7 +3217,7 @@ void Dbg::ManageDeoptimization() { self->TransitionFromSuspendedToRunnable(); } -static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) +static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { @@ -3232,19 +3227,18 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) } // Note: method verifier may cause thread suspension. self->AssertThreadSuspensionIsAllowable(); - StackHandleScope<3> hs(self); + StackHandleScope<2> hs(self); mirror::Class* declaring_class = m->GetDeclaringClass(); Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader())); - Handle<mirror::ArtMethod> method(hs.NewHandle(m)); verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader, - &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method, + &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true, false, true); // Note: we don't need to verify the method. return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr); } -static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) +static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (Breakpoint& breakpoint : gBreakpoints) { if (breakpoint.Method() == m) { @@ -3254,13 +3248,13 @@ static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) return nullptr; } -bool Dbg::MethodHasAnyBreakpoints(mirror::ArtMethod* method) { +bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) { ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); return FindFirstBreakpointForMethod(method) != nullptr; } // Sanity checks all existing breakpoints on the same method. -static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, +static void SanityCheckExistingBreakpoints(ArtMethod* m, DeoptimizationRequest::Kind deoptimization_kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (const Breakpoint& breakpoint : gBreakpoints) { @@ -3289,7 +3283,7 @@ static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, // If a breakpoint has already been set, we also return the first breakpoint // through the given 'existing_brkpt' pointer. static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, - mirror::ArtMethod* m, + ArtMethod* m, const Breakpoint** existing_brkpt) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!Dbg::RequiresDeoptimization()) { @@ -3353,7 +3347,7 @@ static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, // request if we need to deoptimize. void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { Thread* const self = Thread::Current(); - mirror::ArtMethod* m = FromMethodId(location->method_id); + ArtMethod* m = FromMethodId(location->method_id); DCHECK(m != nullptr) << "No method for method id " << location->method_id; const Breakpoint* existing_breakpoint = nullptr; @@ -3388,7 +3382,7 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques // request if we need to undeoptimize. void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); - mirror::ArtMethod* m = FromMethodId(location->method_id); + ArtMethod* m = FromMethodId(location->method_id); DCHECK(m != nullptr) << "No method for method id " << location->method_id; DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing; for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { @@ -3428,7 +3422,7 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ } } -bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) { +bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) { const SingleStepControl* const ssc = thread->GetSingleStepControl(); if (ssc == nullptr) { // If we are not single-stepping, then we don't have to force interpreter. @@ -3448,7 +3442,7 @@ bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMet return false; } -bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) { +bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) { instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation(); // If we are in interpreter only mode, then we don't have to force interpreter. @@ -3482,7 +3476,7 @@ bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::Art return instrumentation->IsDeoptimized(m); } -bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) { +bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) { // The upcall can be null and in that case we don't need to do anything. if (m == nullptr) { return false; @@ -3519,7 +3513,7 @@ bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror: return instrumentation->IsDeoptimized(m); } -bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) { +bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) { // The upcall can be null and in that case we don't need to do anything. if (m == nullptr) { return false; @@ -3623,7 +3617,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses // annotalysis. bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (!m->IsRuntimeMethod()) { ++stack_depth; if (method == nullptr) { @@ -3639,7 +3633,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize } int stack_depth; - mirror::ArtMethod* method; + ArtMethod* method; int32_t line_number; }; @@ -3701,7 +3695,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize return JDWP::ERR_OUT_OF_MEMORY; } - mirror::ArtMethod* m = single_step_control->GetMethod(); + ArtMethod* m = single_step_control->GetMethod(); const int32_t line_number = visitor.line_number; // Note: if the thread is not running Java code (pure native thread), there is no "current" // method on the stack (and no line number either). @@ -3838,7 +3832,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec return error; } - mirror::ArtMethod* m = FromMethodId(method_id); + ArtMethod* m = FromMethodId(method_id); if (m->IsStatic() != (receiver == nullptr)) { return JDWP::ERR_INVALID_METHODID; } @@ -3860,8 +3854,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec } { - StackHandleScope<3> hs(soa.Self()); - HandleWrapper<mirror::ArtMethod> h_m(hs.NewHandleWrapper(&m)); + StackHandleScope<2> hs(soa.Self()); HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver)); HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c)); const DexFile::TypeList* types = m->GetParameterTypeList(); @@ -3873,7 +3866,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec if (shorty[i + 1] == 'L') { // Did we really get an argument of an appropriate reference type? mirror::Class* parameter_type = - h_m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true); + m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true); mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error); if (error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; @@ -3976,32 +3969,34 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { // We can be called while an exception is pending. We need // to preserve that across the method invocation. - StackHandleScope<4> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException()); soa.Self()->ClearException(); // Translate the method through the vtable, unless the debugger wants to suppress it. - MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method.Read())); + auto* m = pReq->method; + auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) { - mirror::ArtMethod* actual_method = pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m.Get()); - if (actual_method != m.Get()) { - VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) + ArtMethod* actual_method = + pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size); + if (actual_method != m) { + VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m) << " to " << PrettyMethod(actual_method); - m.Assign(actual_method); + m = actual_method; } } - VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get()) + VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m) << " receiver=" << pReq->receiver.Read() << " arg_count=" << pReq->arg_count; - CHECK(m.Get() != nullptr); + CHECK(m != nullptr); CHECK_EQ(sizeof(jvalue), sizeof(uint64_t)); ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read())); - JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m.Get()), + JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m), reinterpret_cast<jvalue*>(pReq->arg_values)); - pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty()); + pReq->result_tag = BasicTagFromDescriptor(m->GetShorty()); const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT); Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr); Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException()); @@ -4744,7 +4739,7 @@ struct AllocRecordStackVisitor : public StackVisitor { if (depth >= kMaxAllocRecordStackDepth) { return false; } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (!m->IsRuntimeMethod()) { record->StackElement(depth)->SetMethod(m); record->StackElement(depth)->SetDexPc(GetDexPc()); @@ -4828,7 +4823,7 @@ void Dbg::DumpRecentAllocations() { for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) { AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame); - mirror::ArtMethod* m = stack_element->Method(); + ArtMethod* m = stack_element->Method(); if (m == nullptr) { break; } @@ -4884,7 +4879,7 @@ class StringTable { DISALLOW_COPY_AND_ASSIGN(StringTable); }; -static const char* GetMethodSourceFile(mirror::ArtMethod* method) +static const char* GetMethodSourceFile(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(method != nullptr); const char* source_file = method->GetDeclaringClassSourceFile(); @@ -4957,7 +4952,7 @@ jbyteArray Dbg::GetRecentAllocations() { std::string temp; class_names.Add(record->Type()->GetDescriptor(&temp)); for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) { - mirror::ArtMethod* m = record->StackElement(i)->Method(); + ArtMethod* m = record->StackElement(i)->Method(); if (m != nullptr) { class_names.Add(m->GetDeclaringClassDescriptor()); method_names.Add(m->GetName()); @@ -5019,7 +5014,7 @@ jbyteArray Dbg::GetRecentAllocations() { // (2b) method name // (2b) method source file // (2b) line number, clipped to 32767; -2 if native; -1 if no source - mirror::ArtMethod* m = record->StackElement(stack_frame)->Method(); + ArtMethod* m = record->StackElement(stack_frame)->Method(); size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor()); size_t method_name_index = method_names.IndexOf(m->GetName()); size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m)); @@ -5047,12 +5042,12 @@ jbyteArray Dbg::GetRecentAllocations() { return result; } -mirror::ArtMethod* DeoptimizationRequest::Method() const { +ArtMethod* DeoptimizationRequest::Method() const { ScopedObjectAccessUnchecked soa(Thread::Current()); return soa.DecodeMethod(method_); } -void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) { +void DeoptimizationRequest::SetMethod(ArtMethod* m) { ScopedObjectAccessUnchecked soa(Thread::Current()); method_ = soa.EncodeMethod(m); } diff --git a/runtime/debugger.h b/runtime/debugger.h index 811d345262..7c586a4ff9 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -37,13 +37,13 @@ namespace art { namespace mirror { -class ArtMethod; class Class; class Object; class Throwable; } // namespace mirror class AllocRecord; class ArtField; +class ArtMethod; class ObjectRegistry; class ScopedObjectAccessUnchecked; class StackVisitor; @@ -54,7 +54,7 @@ class Thread; */ struct DebugInvokeReq { DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class, - mirror::ArtMethod* invoke_method, uint32_t invoke_options, + ArtMethod* invoke_method, uint32_t invoke_options, uint64_t* args, uint32_t args_count) : receiver(invoke_receiver), klass(invoke_class), method(invoke_method), arg_count(args_count), arg_values(args), options(invoke_options), @@ -66,7 +66,7 @@ struct DebugInvokeReq { /* request */ GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod GcRoot<mirror::Class> klass; - GcRoot<mirror::ArtMethod> method; + ArtMethod* method; const uint32_t arg_count; uint64_t* const arg_values; // will be null if arg_count_ == 0 const uint32_t options; @@ -92,7 +92,7 @@ struct DebugInvokeReq { class SingleStepControl { public: SingleStepControl(JDWP::JdwpStepSize step_size, JDWP::JdwpStepDepth step_depth, - int stack_depth, mirror::ArtMethod* method) + int stack_depth, ArtMethod* method) : step_size_(step_size), step_depth_(step_depth), stack_depth_(stack_depth), method_(method) { } @@ -109,17 +109,14 @@ class SingleStepControl { return stack_depth_; } - mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return method_.Read(); + ArtMethod* GetMethod() const { + return method_; } const std::set<uint32_t>& GetDexPcs() const { return dex_pcs_; } - void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AddDexPc(uint32_t dex_pc); bool ContainsDexPc(uint32_t dex_pc) const; @@ -138,7 +135,8 @@ class SingleStepControl { // set of DEX pcs associated to the source line number where the suspension occurred. // This is used to support SD_INTO and SD_OVER single-step depths so we detect when a single-step // causes the execution of an instruction in a different method or at a different line number. - GcRoot<mirror::ArtMethod> method_; + ArtMethod* method_; + std::set<uint32_t> dex_pcs_; DISALLOW_COPY_AND_ASSIGN(SingleStepControl); @@ -166,9 +164,9 @@ class DeoptimizationRequest { SetMethod(other.Method()); } - mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Name 'Kind()' would collide with the above enum name. Kind GetKind() const { @@ -256,7 +254,7 @@ class Dbg { static bool IsJdwpConfigured(); // Returns true if a method has any breakpoints. - static bool MethodHasAnyBreakpoints(mirror::ArtMethod* method) + static bool MethodHasAnyBreakpoints(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::breakpoint_lock_); @@ -524,10 +522,10 @@ class Dbg { kMethodEntry = 0x04, kMethodExit = 0x08, }; - static void PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object, + static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc, + static void PostFieldModificationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f, const JValue* field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -541,7 +539,7 @@ class Dbg { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void UpdateDebugger(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t new_dex_pc, + ArtMethod* method, uint32_t new_dex_pc, int event_flags, const JValue* return_value) LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -574,7 +572,7 @@ class Dbg { // Indicates whether we need to force the use of interpreter to invoke a method. // This allows to single-step or continue into the called method. - static bool IsForcedInterpreterNeededForCalling(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; @@ -585,7 +583,7 @@ class Dbg { // Indicates whether we need to force the use of interpreter entrypoint when calling a // method through the resolution trampoline. This allows to single-step or continue into // the called method. - static bool IsForcedInterpreterNeededForResolution(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; @@ -596,7 +594,7 @@ class Dbg { // Indicates whether we need to force the use of instrumentation entrypoint when calling // a method through the resolution trampoline. This allows to deoptimize the stack for // debugging when we returned from the called method. - static bool IsForcedInstrumentationNeededForResolution(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; @@ -607,7 +605,7 @@ class Dbg { // Indicates whether we need to force the use of interpreter when returning from the // interpreter into the runtime. This allows to deoptimize the stack and continue // execution with interpreter for debugging. - static bool IsForcedInterpreterNeededForUpcall(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; @@ -709,7 +707,7 @@ class Dbg { static JDWP::FieldId ToFieldId(const ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc) + static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static JDWP::JdwpState* GetJdwpState(); @@ -733,7 +731,7 @@ class Dbg { static void PostThreadStartOrStop(Thread*, uint32_t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void PostLocationEvent(mirror::ArtMethod* method, int pcOffset, + static void PostLocationEvent(ArtMethod* method, int pcOffset, mirror::Object* thisPtr, int eventFlags, const JValue* return_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -745,16 +743,16 @@ class Dbg { EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) + static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(Locks::alloc_tracker_lock_); diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index dfe5a04d8f..25d5ef429b 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -28,6 +28,7 @@ #include <sstream> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/stringprintf.h" #include "class_linker.h" @@ -35,7 +36,6 @@ #include "dex_file_verifier.h" #include "globals.h" #include "leb128.h" -#include "mirror/art_method-inl.h" #include "mirror/string.h" #include "os.h" #include "safe_map.h" @@ -760,7 +760,7 @@ const Signature DexFile::CreateSignature(const StringPiece& signature) const { return Signature(this, *proto_id); } -int32_t DexFile::GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) const { +int32_t DexFile::GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const { // For native method, lineno should be -2 to indicate it is native. Note that // "line number == -2" is how libcore tells from StackTraceElement. if (method->GetCodeItemOffset() == 0) { diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 84eaa4a73e..d017601565 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -37,11 +37,11 @@ namespace art { // TODO: remove dependencies on mirror classes, primarily by moving // EncodedStaticFieldValueIterator to its own file. namespace mirror { - class ArtMethod; class ClassLoader; class DexCache; } // namespace mirror class ArtField; +class ArtMethod; class ClassLinker; class MemMap; class OatDexFile; @@ -861,7 +861,7 @@ class DexFile { // Returns -2 for native methods (as expected in exception traces). // // This is used by runtime; therefore use art::Method not art::DexFile::Method. - int32_t GetLineNumFromPC(mirror::ArtMethod* method, uint32_t rel_pc) const + int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 9292cff88e..a4dd55cebe 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -19,6 +19,7 @@ #include "entrypoint_utils.h" +#include "art_method.h" #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file.h" @@ -27,7 +28,6 @@ #include "indirect_reference_table.h" #include "invoke_type.h" #include "jni_internal.h" -#include "mirror/art_method.h" #include "mirror/array.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" @@ -38,15 +38,15 @@ namespace art { -inline mirror::ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) +inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - auto* refs_only_sp = self->GetManagedStack()->GetTopQuickFrame(); - DCHECK_EQ(refs_only_sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(type)); + auto** refs_only_sp = self->GetManagedStack()->GetTopQuickFrame(); + DCHECK_EQ(*refs_only_sp, Runtime::Current()->GetCalleeSaveMethod(type)); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type); - auto* caller_sp = reinterpret_cast<StackReference<mirror::ArtMethod>*>( - reinterpret_cast<uintptr_t>(refs_only_sp) + callee_frame_size); - auto* caller = caller_sp->AsMirrorPtr(); + auto** caller_sp = reinterpret_cast<ArtMethod**>( + reinterpret_cast<uintptr_t>(refs_only_sp) + callee_frame_size); + auto* caller = *caller_sp; if (kIsDebugBuild) { NthCallerVisitor visitor(self, 1, true); @@ -60,7 +60,7 @@ inline mirror::ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::Calle template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, bool* slow_path) { mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); if (UNLIKELY(klass == nullptr)) { @@ -141,7 +141,7 @@ inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) { bool slow_path = false; @@ -193,7 +193,7 @@ template <bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, bool* slow_path) { if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); @@ -229,7 +229,7 @@ template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) { bool slow_path = false; @@ -252,7 +252,7 @@ template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) { DCHECK(klass != nullptr); @@ -274,7 +274,7 @@ inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, } template<FindFieldType type, bool access_check> -inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, +inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size) { bool is_primitive; bool is_set; @@ -347,8 +347,8 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referr #define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \ - mirror::ArtMethod* referrer, \ - Thread* self, size_t expected_size) \ + ArtMethod* referrer, \ + Thread* self, size_t expected_size) \ #define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \ @@ -367,17 +367,16 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite); #undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL template<InvokeType type, bool access_check> -inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, - mirror::Object** this_object, - mirror::ArtMethod** referrer, Thread* self) { +inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_object, + ArtMethod** referrer, Thread* self) { ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); - mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer); + ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer); if (resolved_method == nullptr) { StackHandleScope<1> hs(self); mirror::Object* null_this = nullptr; HandleWrapper<mirror::Object> h_this( hs.NewHandleWrapper(type == kStatic ? &null_this : this_object)); - resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type); + resolved_method = class_linker->ResolveMethod(self, method_idx, *referrer, type); } if (UNLIKELY(resolved_method == nullptr)) { DCHECK(self->IsExceptionPending()); // Throw exception and unwind. @@ -420,7 +419,7 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, return nullptr; // Failure. } DCHECK(klass->HasVTable()) << PrettyClass(klass); - return klass->GetVTableEntry(vtable_index); + return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize()); } case kSuper: { mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass(); @@ -439,23 +438,25 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, DCHECK(super_class != nullptr); } DCHECK(super_class->HasVTable()); - return super_class->GetVTableEntry(vtable_index); + return super_class->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize()); } case kInterface: { uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize; - mirror::ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(imt_index); + ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry( + imt_index, class_linker->GetImagePointerSize()); if (!imt_method->IsImtConflictMethod() && !imt_method->IsImtUnimplementedMethod()) { if (kIsDebugBuild) { mirror::Class* klass = (*this_object)->GetClass(); - mirror::ArtMethod* method = klass->FindVirtualMethodForInterface(resolved_method); + ArtMethod* method = klass->FindVirtualMethodForInterface( + resolved_method, class_linker->GetImagePointerSize()); CHECK_EQ(imt_method, method) << PrettyMethod(resolved_method) << " / " << PrettyMethod(imt_method) << " / " << PrettyMethod(method) << " / " << PrettyClass(klass); } return imt_method; } else { - mirror::ArtMethod* interface_method = - (*this_object)->GetClass()->FindVirtualMethodForInterface(resolved_method); + ArtMethod* interface_method = (*this_object)->GetClass()->FindVirtualMethodForInterface( + resolved_method, class_linker->GetImagePointerSize()); if (UNLIKELY(interface_method == nullptr)) { ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, *this_object, *referrer); @@ -473,10 +474,10 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, // Explicit template declarations of FindMethodFromCode for all invoke types. #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ - mirror::Object** this_object, \ - mirror::ArtMethod** referrer, \ - Thread* self) + ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ + mirror::Object** this_object, \ + ArtMethod** referrer, \ + Thread* self) #define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \ EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true) @@ -491,9 +492,8 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface); #undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL // Fast path field resolution that can't initialize classes or throw exceptions. -inline ArtField* FindFieldFast(uint32_t field_idx, - mirror::ArtMethod* referrer, - FindFieldType type, size_t expected_size) { +inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFieldType type, + size_t expected_size) { ArtField* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, sizeof(void*)); if (UNLIKELY(resolved_field == nullptr)) { @@ -530,8 +530,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx, } mirror::Class* referring_class = referrer->GetDeclaringClass(); if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()) || + !referring_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()) || (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { // Illegal access. return nullptr; @@ -544,15 +543,13 @@ inline ArtField* FindFieldFast(uint32_t field_idx, } // Fast path method resolution that can't throw exceptions. -inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, - mirror::Object* this_object, - mirror::ArtMethod* referrer, - bool access_check, InvokeType type) { +inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object, + ArtMethod* referrer, bool access_check, InvokeType type) { if (UNLIKELY(this_object == nullptr && type != kStatic)) { return nullptr; } - mirror::ArtMethod* resolved_method = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); + ArtMethod* resolved_method = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*)); if (UNLIKELY(resolved_method == nullptr)) { return nullptr; } @@ -572,22 +569,21 @@ inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, } } if (type == kInterface) { // Most common form of slow path dispatch. - return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method, sizeof(void*)); } else if (type == kStatic || type == kDirect) { return resolved_method; } else if (type == kSuper) { - return referrer->GetDeclaringClass()->GetSuperClass() - ->GetVTableEntry(resolved_method->GetMethodIndex()); + return referrer->GetDeclaringClass()->GetSuperClass()->GetVTableEntry( + resolved_method->GetMethodIndex(), sizeof(void*)); } else { DCHECK(type == kVirtual); - return this_object->GetClass()->GetVTableEntry(resolved_method->GetMethodIndex()); + return this_object->GetClass()->GetVTableEntry( + resolved_method->GetMethodIndex(), sizeof(void*)); } } -inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, - mirror::ArtMethod* referrer, - Thread* self, bool can_run_clinit, - bool verify_access) { +inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, ArtMethod* referrer, Thread* self, + bool can_run_clinit, bool verify_access) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); if (UNLIKELY(klass == nullptr)) { @@ -620,8 +616,7 @@ inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, return h_class.Get(); } -inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, - uint32_t string_idx) { +inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); return class_linker->ResolveString(string_idx, referrer); } diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index ce5673923f..fc7f8b782a 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -17,11 +17,11 @@ #include "entrypoints/entrypoint_utils.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/mutex.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/method.h" #include "mirror/object-inl.h" @@ -35,7 +35,7 @@ namespace art { static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self, bool access_check) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -76,7 +76,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, // Helper function to allocate array for FILLED_NEW_ARRAY. mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* referrer, Thread* self, + ArtMethod* referrer, Thread* self, bool access_check, gc::AllocatorType /* allocator_type */) { mirror::Class* klass = CheckFilledNewArrayAlloc(type_idx, component_count, referrer, self, @@ -96,7 +96,7 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_c // Helper function to allocate array for FILLED_NEW_ARRAY. mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self, bool access_check, gc::AllocatorType /* allocator_type */) { @@ -294,22 +294,19 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj); mirror::Class* proxy_class = rcvr->GetClass(); mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj); - mirror::ArtMethod* proxy_method = - rcvr->GetClass()->FindVirtualMethodForInterface(interface_method->GetArtMethod()); - int throws_index = -1; - size_t num_virt_methods = proxy_class->NumVirtualMethods(); - for (size_t i = 0; i < num_virt_methods; i++) { - if (proxy_class->GetVirtualMethod(i) == proxy_method) { - throws_index = i; - break; - } - } - CHECK_NE(throws_index, -1); + ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface( + interface_method->GetArtMethod(), sizeof(void*)); + auto* virtual_methods = proxy_class->GetVirtualMethodsPtr(); + size_t num_virtuals = proxy_class->NumVirtualMethods(); + size_t method_size = ArtMethod::ObjectSize(sizeof(void*)); + int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) - + reinterpret_cast<uintptr_t>(virtual_methods)) / method_size; + CHECK_LT(throws_index, static_cast<int>(num_virtuals)); mirror::ObjectArray<mirror::Class>* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); mirror::Class* exception_class = exception->GetClass(); bool declares_exception = false; - for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { + for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { mirror::Class* declared_exception = declared_exceptions->Get(i); declares_exception = declared_exception->IsAssignableFrom(exception_class); } diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 8d419f8a7b..47865a2a80 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -31,19 +31,19 @@ namespace art { namespace mirror { class Array; - class ArtMethod; class Class; class Object; class String; } // namespace mirror class ArtField; +class ArtMethod; class ScopedObjectAccessAlreadyRunnable; class Thread; template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, bool* slow_path) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -58,7 +58,7 @@ ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror:: // check. template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -81,7 +81,7 @@ ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Clas template <bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, bool* slow_path) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -92,7 +92,7 @@ ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -100,20 +100,20 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, Thread* self, + ArtMethod* method, Thread* self, bool access_check, gc::AllocatorType allocator_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, int32_t component_count, - mirror::ArtMethod* method, + ArtMethod* method, Thread* self, bool access_check, gc::AllocatorType allocator_type) @@ -132,38 +132,33 @@ enum FindFieldType { }; template<FindFieldType type, bool access_check> -inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, - Thread* self, size_t expected_size) +inline ArtField* FindFieldFromCode( + uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<InvokeType type, bool access_check> -inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, - mirror::Object** this_object, - mirror::ArtMethod** referrer, Thread* self) +inline ArtMethod* FindMethodFromCode( + uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. -inline ArtField* FindFieldFast(uint32_t field_idx, - mirror::ArtMethod* referrer, - FindFieldType type, size_t expected_size) +inline ArtField* FindFieldFast( + uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Fast path method resolution that can't throw exceptions. -inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, - mirror::Object* this_object, - mirror::ArtMethod* referrer, - bool access_check, InvokeType type) +inline ArtMethod* FindMethodFast( + uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check, + InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, - mirror::ArtMethod* referrer, - Thread* self, bool can_run_clinit, - bool verify_access) +inline mirror::Class* ResolveVerifyAndClinit( + uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, uint32_t string_idx) +inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: annotalysis disabled as monitor semantics are maintained in Java code. diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc index d4844c2a95..72c2e0a5b0 100644 --- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc @@ -14,10 +14,10 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "class_linker.h" #include "dex_file-inl.h" #include "interpreter/interpreter.h" -#include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "reflection.h" #include "runtime.h" @@ -27,7 +27,7 @@ namespace art { extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) { - mirror::ArtMethod* method = shadow_frame->GetMethod(); + ArtMethod* method = shadow_frame->GetMethod(); // Ensure static methods are initialized. if (method->IsStatic()) { mirror::Class* declaringClass = method->GetDeclaringClass(); @@ -50,7 +50,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile:: uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_; method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset), (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t), - result, method->GetShorty()); + result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty()); } } // namespace art diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc index a68eeebff8..22226c1dfb 100644 --- a/runtime/entrypoints/jni/jni_entrypoints.cc +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -14,9 +14,9 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "base/logging.h" #include "entrypoints/entrypoint_utils.h" -#include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "scoped_thread_state_change.h" #include "thread.h" @@ -34,7 +34,7 @@ extern "C" void* artFindNativeMethod(Thread* self) { Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. ScopedObjectAccess soa(self); - mirror::ArtMethod* method = self->GetCurrentMethod(nullptr); + ArtMethod* method = self->GetCurrentMethod(nullptr); DCHECK(method != nullptr); // Lookup symbol address for method, on failure we'll return null with an exception set, diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 8cd6ca6777..521c549193 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -32,9 +32,7 @@ #include "arch/x86_64/quick_method_frame_info_x86_64.h" namespace art { -namespace mirror { class ArtMethod; -} // namespace mirror class ScopedQuickEntrypointChecks { public: diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index fa129afd39..f56b5e45b6 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -16,9 +16,9 @@ #include "entrypoints/quick/quick_alloc_entrypoints.h" +#include "art_method-inl.h" #include "callee_save_frame.h" #include "entrypoints/entrypoint_utils-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" @@ -29,7 +29,7 @@ static constexpr bool kUseTlabFastPath = true; #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ - uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ @@ -56,7 +56,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \ } \ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ - mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \ + mirror::Class* klass, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ @@ -83,7 +83,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \ } \ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ - mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \ + mirror::Class* klass, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ @@ -108,34 +108,34 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \ } \ extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \ - uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \ - uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \ - mirror::Class* klass, int32_t component_count, mirror::ArtMethod* method, Thread* self) \ + mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ - uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ - uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ @@ -145,7 +145,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ } \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ - uint32_t type_idx, int32_t component_count, mirror::ArtMethod* method, Thread* self) \ + uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ @@ -193,27 +193,27 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(Region, gc::kAllocatorTypeRegion) GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB) #define GENERATE_ENTRYPOINTS(suffix) \ -extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \ extern "C" void* art_quick_alloc_string_from_chars##suffix(int32_t, int32_t, void*); \ extern "C" void* art_quick_alloc_string_from_string##suffix(void*); \ -extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \ -extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ +extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \ extern "C" void* art_quick_alloc_string_from_chars##suffix##_instrumented(int32_t, int32_t, void*); \ extern "C" void* art_quick_alloc_string_from_string##suffix##_instrumented(void*); \ diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h index 1fd8a949a9..c7aaa2021f 100644 --- a/runtime/entrypoints/quick/quick_default_externs.h +++ b/runtime/entrypoints/quick/quick_default_externs.h @@ -22,10 +22,10 @@ namespace art { namespace mirror { class Array; -class ArtMethod; class Class; class Object; } // namespace mirror +class ArtMethod; } // namespace art // These are extern declarations of assembly stubs with common names. @@ -97,9 +97,9 @@ extern "C" int32_t art_quick_string_compareto(void*, void*); extern "C" void* art_quick_memcpy(void*, const void*, size_t); // Invoke entrypoints. -extern "C" void art_quick_imt_conflict_trampoline(art::mirror::ArtMethod*); -extern "C" void art_quick_resolution_trampoline(art::mirror::ArtMethod*); -extern "C" void art_quick_to_interpreter_bridge(art::mirror::ArtMethod*); +extern "C" void art_quick_imt_conflict_trampoline(art::ArtMethod*); +extern "C" void art_quick_resolution_trampoline(art::ArtMethod*); +extern "C" void art_quick_to_interpreter_bridge(art::ArtMethod*); extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index 6a8aaf2610..3eefeef84a 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -17,7 +17,6 @@ #include "callee_save_frame.h" #include "dex_file-inl.h" #include "interpreter/interpreter.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index 46629f5958..67649d4c64 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -14,12 +14,12 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "callee_save_frame.h" #include "entrypoints/entrypoint_utils-inl.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "gc/accounting/card_table-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index b72ce34648..cef2510451 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -29,13 +29,13 @@ namespace art { namespace mirror { class Array; -class ArtMethod; class Class; class Object; template<class MirrorType> class CompressedReference; } // namespace mirror +class ArtMethod; class Thread; // Pointers to functions that are called by quick compiler generated code via thread-local storage. diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index 0aca58fb16..60bbf4ac82 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -20,15 +20,15 @@ // All quick entrypoints. Format is name, return type, argument types. #define QUICK_ENTRYPOINT_LIST(V) \ - V(AllocArray, void*, uint32_t, int32_t, mirror::ArtMethod*) \ - V(AllocArrayResolved, void*, mirror::Class*, int32_t, mirror::ArtMethod*) \ - V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, mirror::ArtMethod*) \ - V(AllocObject, void*, uint32_t, mirror::ArtMethod*) \ - V(AllocObjectResolved, void*, mirror::Class*, mirror::ArtMethod*) \ - V(AllocObjectInitialized, void*, mirror::Class*, mirror::ArtMethod*) \ - V(AllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*) \ - V(CheckAndAllocArray, void*, uint32_t, int32_t, mirror::ArtMethod*) \ - V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, mirror::ArtMethod*) \ + V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \ + V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \ + V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \ + V(AllocObject, void*, uint32_t, ArtMethod*) \ + V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \ + V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \ + V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \ + V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \ + V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \ V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \ V(AllocStringFromChars, void*, int32_t, int32_t, void*) \ V(AllocStringFromString, void*, void*) \ @@ -77,7 +77,7 @@ V(JniMethodEndSynchronized, void, uint32_t, jobject, Thread*) \ V(JniMethodEndWithReference, mirror::Object*, jobject, uint32_t, Thread*) \ V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject, uint32_t, jobject, Thread*) \ - V(QuickGenericJniTrampoline, void, mirror::ArtMethod*) \ + V(QuickGenericJniTrampoline, void, ArtMethod*) \ \ V(LockObject, void, mirror::Object*) \ V(UnlockObject, void, mirror::Object*) \ @@ -106,9 +106,9 @@ V(StringCompareTo, int32_t, void*, void*) \ V(Memcpy, void*, void*, const void*, size_t) \ \ - V(QuickImtConflictTrampoline, void, mirror::ArtMethod*) \ - V(QuickResolutionTrampoline, void, mirror::ArtMethod*) \ - V(QuickToInterpreterBridge, void, mirror::ArtMethod*) \ + V(QuickImtConflictTrampoline, void, ArtMethod*) \ + V(QuickResolutionTrampoline, void, ArtMethod*) \ + V(QuickToInterpreterBridge, void, ArtMethod*) \ V(InvokeDirectTrampolineWithAccessCheck, void, uint32_t, void*) \ V(InvokeInterfaceTrampolineWithAccessCheck, void, uint32_t, void*) \ V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \ diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index b5a7c09531..871cf3c256 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -15,17 +15,17 @@ */ #include "art_field-inl.h" +#include "art_method-inl.h" #include "callee_save_frame.h" #include "dex_file-inl.h" #include "entrypoints/entrypoint_utils-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include <stdint.h> namespace art { -extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, +extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -40,7 +40,7 @@ extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, mirror::ArtMethod return 0; // Will throw exception by checking with Thread::Current. } -extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, +extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -55,7 +55,7 @@ extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, mirror::ArtMe return 0; // Will throw exception by checking with Thread::Current. } -extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, +extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -71,7 +71,7 @@ extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, mirror::ArtMeth } extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -87,7 +87,7 @@ extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, } extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -103,7 +103,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, } extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -119,7 +119,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, } extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -137,7 +137,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, } extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); @@ -157,7 +157,7 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* } extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); @@ -176,7 +176,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Obj return 0; // Will throw exception by checking with Thread::Current. } extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); @@ -196,7 +196,7 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Objec } extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); @@ -216,7 +216,7 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Objec } extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); @@ -236,7 +236,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* } extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); @@ -256,7 +256,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* } extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, - mirror::ArtMethod* referrer, + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -278,7 +278,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror: } extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t)); @@ -309,7 +309,7 @@ extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, } extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t)); @@ -340,7 +340,7 @@ extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, } extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); @@ -358,7 +358,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, return -1; // failure } -extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer, +extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, uint64_t new_value, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); @@ -378,7 +378,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* ref } extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, @@ -401,7 +401,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v } extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t)); @@ -440,7 +440,7 @@ extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, } extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t)); @@ -480,7 +480,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, } extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); @@ -508,7 +508,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, } extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); @@ -533,7 +533,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, mirror::Object* new_value, - mirror::ArtMethod* referrer, Thread* self) + ArtMethod* referrer, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index e3365431ce..d3991cdb78 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -14,9 +14,9 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "callee_save_frame.h" #include "mirror/array.h" -#include "mirror/art_method-inl.h" #include "entrypoints/entrypoint_utils.h" namespace art { @@ -25,7 +25,7 @@ namespace art { * Handle fill array data by copying appropriate part of dex file into array. */ extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array, - mirror::ArtMethod* method, Thread* self) + ArtMethod* method, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); const uint16_t* const insns = method->GetCodeItem()->insns_; diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc index eb1b1056a4..7eb73c3b59 100644 --- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -14,17 +14,17 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "callee_save_frame.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "instrumentation.h" -#include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "runtime.h" #include "thread-inl.h" namespace art { -extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method, +extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, mirror::Object* this_object, Thread* self, uintptr_t lr) @@ -45,8 +45,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* return result; } -extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, - StackReference<mirror::ArtMethod>* sp, +extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp, uint64_t gpr_result, uint64_t fpr_result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 51817a249d..de225ad8e8 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -14,8 +14,8 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "entrypoints/entrypoint_utils-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/object-inl.h" #include "thread-inl.h" #include "verify_object-inl.h" @@ -35,7 +35,7 @@ extern uint32_t JniMethodStart(Thread* self) { DCHECK(env != nullptr); uint32_t saved_local_ref_cookie = env->local_ref_cookie; env->local_ref_cookie = env->locals.GetSegmentState(); - mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr(); + ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame(); if (!native_method->IsFastNative()) { // When not fast JNI we transition out of runnable. self->TransitionFromRunnableToSuspended(kNative); @@ -50,7 +50,7 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) { // TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI. static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS { - mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr(); + ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame(); bool is_fast = native_method->IsFastNative(); if (!is_fast) { self->TransitionFromSuspendedToRunnable(); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 2e7e2dfd74..bc15cc79c9 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "callee_save_frame.h" #include "common_throws.h" #include "dex_file-inl.h" @@ -23,7 +24,6 @@ #include "gc/accounting/card_table-inl.h" #include "interpreter/interpreter.h" #include "method_reference.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/method.h" @@ -279,10 +279,10 @@ class QuickArgumentVisitor { // 'this' object is the 1st argument. They also have the same frame layout as the // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the // 1st GPR. - static mirror::Object* GetProxyThisObject(StackReference<mirror::ArtMethod>* sp) + static mirror::Object* GetProxyThisObject(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(sp->AsMirrorPtr()->IsProxyMethod()); - CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, sp->AsMirrorPtr()->GetFrameSizeInBytes()); + CHECK((*sp)->IsProxyMethod()); + CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes()); CHECK_GT(kNumQuickGprArgs, 0u); constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + @@ -291,28 +291,28 @@ class QuickArgumentVisitor { return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); } - static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp) + static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); - uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; - return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr(); + DCHECK((*sp)->IsCalleeSaveMethod()); + uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; + return *reinterpret_cast<ArtMethod**>(previous_sp); } // For the given quick ref and args quick frame, return the caller's PC. - static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod()); + static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK((*sp)->IsCalleeSaveMethod()); uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; return *reinterpret_cast<uintptr_t*>(lr); } - QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty, + QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize - + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>. + + sizeof(ArtMethod*)), // Skip ArtMethod*. gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) { static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), @@ -323,6 +323,7 @@ class QuickArgumentVisitor { // next register is even. static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0, "Number of Quick FPR arguments not even"); + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); } virtual ~QuickArgumentVisitor() {} @@ -354,7 +355,8 @@ class QuickArgumentVisitor { } bool IsSplitLongOrDouble() const { - if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { + if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || + (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) { return is_split_long_or_double_; } else { return false; // An optimization for when GPR and FPRs are 64bit. @@ -539,7 +541,7 @@ class QuickArgumentVisitor { // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. -extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp) +extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return QuickArgumentVisitor::GetProxyThisObject(sp); } @@ -547,9 +549,8 @@ extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::Art // Visits arguments on the stack placing them into the shadow frame. class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { public: - BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, - const char* shorty, uint32_t shorty_len, ShadowFrame* sf, - size_t first_arg_reg) : + BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty, + uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; @@ -594,8 +595,7 @@ void BuildQuickShadowFrameVisitor::Visit() { ++cur_reg_; } -extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, - StackReference<mirror::ArtMethod>* sp) +extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // Ensure we don't get thread suspension until the object arguments are safely in the shadow // frame. @@ -616,7 +616,8 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory)); size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; uint32_t shorty_len = 0; - const char* shorty = method->GetShorty(&shorty_len); + auto* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); + const char* shorty = non_proxy_method->GetShorty(&shorty_len); BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, shadow_frame, first_arg_reg); shadow_frame_builder.VisitArguments(); @@ -643,7 +644,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa self->PopManagedStackFragment(fragment); // Request a stack deoptimization if needed - mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); + ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { self->SetException(Thread::GetDeoptimizationException()); self->SetDeoptimizationReturnValue(result); @@ -658,8 +659,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa // to jobjects. class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { public: - BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, - const char* shorty, uint32_t shorty_len, + BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len, ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} @@ -722,9 +722,8 @@ void BuildQuickArgumentVisitor::FixupReferences() { // which is responsible for recording callee save registers. We explicitly place into jobjects the // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a // field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, - mirror::Object* receiver, - Thread* self, StackReference<mirror::ArtMethod>* sp) +extern "C" uint64_t artQuickProxyInvokeHandler( + ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); @@ -732,7 +731,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, const char* old_cause = self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); // Register the top of the managed stack, making stack crawlable. - DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method); + DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()) << PrettyMethod(proxy_method); @@ -745,12 +744,12 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver); // Placing arguments into args vector and remove the receiver. - mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(); + ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " << PrettyMethod(non_proxy_method); std::vector<jvalue> args; uint32_t shorty_len = 0; - const char* shorty = proxy_method->GetShorty(&shorty_len); + const char* shorty = non_proxy_method->GetShorty(&shorty_len); BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args); local_ref_visitor.VisitArguments(); @@ -758,7 +757,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, args.erase(args.begin()); // Convert proxy method into expected interface method. - mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod(); + ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*)); DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method); DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); self->EndAssertNoThreadSuspension(old_cause); @@ -777,9 +776,8 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method, // so they don't get garbage collected. class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { public: - RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, - const char* shorty, uint32_t shorty_len, - ScopedObjectAccessUnchecked* soa) : + RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, + uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; @@ -813,10 +811,8 @@ void RememberForGcArgumentVisitor::FixupReferences() { } // Lazily resolve a method for quick. Called by stub code. -extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, - mirror::Object* receiver, - Thread* self, - StackReference<mirror::ArtMethod>* sp) +extern "C" const void* artQuickResolutionTrampoline( + ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); // Start new JNI local reference state @@ -827,7 +823,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, // Compute details about the called method (avoid GCs) ClassLinker* linker = Runtime::Current()->GetClassLinker(); - mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); + ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); InvokeType invoke_type; MethodReference called_method(nullptr, 0); const bool called_method_known_on_entry = !called->IsRuntimeMethod(); @@ -906,7 +902,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, HandleWrapper<mirror::Object> h_receiver( hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy)); DCHECK_EQ(caller->GetDexFile(), called_method.dex_file); - called = linker->ResolveMethod(self, called_method.dex_method_index, &caller, invoke_type); + called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type); } const void* code = nullptr; if (LIKELY(!self->IsExceptionPending())) { @@ -917,11 +913,11 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, // Refine called method based on receiver. CHECK(receiver != nullptr) << invoke_type; - mirror::ArtMethod* orig_called = called; + ArtMethod* orig_called = called; if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*)); } else { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); + called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*)); } CHECK(called != nullptr) << PrettyMethod(orig_called) << " " @@ -947,8 +943,9 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, caller_method_name_and_sig_index); } if ((update_dex_cache_method_index != DexFile::kDexNoIndex) && - (caller->GetDexCacheResolvedMethod(update_dex_cache_method_index) != called)) { - caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called); + (caller->GetDexCacheResolvedMethod( + update_dex_cache_method_index, sizeof(void*)) != called)) { + caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*)); } } else if (invoke_type == kStatic) { const auto called_dex_method_idx = called->GetDexMethodIndex(); @@ -958,7 +955,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, // b/19175856 if (called->GetDexFile() == called_method.dex_file && called_method.dex_method_index != called_dex_method_idx) { - called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called); + called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*)); } } @@ -1007,7 +1004,8 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, // Fixup any locally saved objects may have moved during a GC. visitor.FixupReferences(); // Place called method in callee-save frame to be placed as first argument to quick method. - sp->Assign(called); + *sp = called; + return code; } @@ -1487,10 +1485,11 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // is at *m = sp. Will update to point to the bottom of the save frame. // // Note: assumes ComputeAll() has been run before. - void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp, - HandleScope** handle_scope) + void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = (*m)->AsMirrorPtr(); + ArtMethod* method = **m; + + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp); @@ -1502,22 +1501,20 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Under the callee saves put handle scope and new method stack reference. size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_); - size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>); + size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*); sp8 -= scope_and_method; // Align by kStackAlignment. - sp8 = reinterpret_cast<uint8_t*>(RoundDown( - reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); + sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment)); - uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>); + uint8_t* sp8_table = sp8 + sizeof(ArtMethod*); *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(), num_handle_scope_references_); // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us. uint8_t* method_pointer = sp8; - StackReference<mirror::ArtMethod>* new_method_ref = - reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer); - new_method_ref->Assign(method); + auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer); + *new_method_ref = method; *m = new_method_ref; } @@ -1529,8 +1526,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. // Returns the new bottom. Note: this may be unaligned. - uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp, - HandleScope** handle_scope) + uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { // First, fix up the layout of the callee-save frame. // We have to squeeze in the HandleScope, and relocate the method pointer. @@ -1546,9 +1542,9 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { } // WARNING: After this, *sp won't be pointing to the method anymore! - uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m, - const char* shorty, uint32_t shorty_len, HandleScope** handle_scope, - uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) + uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, + HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, + uint32_t** start_fpr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Walk(shorty, shorty_len); @@ -1637,7 +1633,7 @@ class FillNativeCall { class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { public: BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len, - StackReference<mirror::ArtMethod>** sp) + ArtMethod*** sp) : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) { ComputeGenericJniFrameSize fsc; @@ -1655,7 +1651,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { sm_.AdvancePointer(self->GetJniEnv()); if (is_static) { - sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass()); + sm_.AdvanceHandleScope((**sp)->GetDeclaringClass()); } } @@ -1811,10 +1807,9 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) * 1) How many bytes of the alloca can be released, if the value is non-negative. * 2) An error, if the value is negative. */ -extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, - StackReference<mirror::ArtMethod>* sp) +extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* called = sp->AsMirrorPtr(); + ArtMethod* called = *sp; DCHECK(called->IsNative()) << PrettyMethod(called, true); uint32_t shorty_len = 0; const char* shorty = called->GetShorty(&shorty_len); @@ -1887,15 +1882,15 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, */ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame(); + ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); - mirror::ArtMethod* called = sp->AsMirrorPtr(); + ArtMethod* called = *sp; uint32_t cookie = *(sp32 - 1); jobject lock = nullptr; if (called->IsSynchronized()) { HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) - + sizeof(StackReference<mirror::ArtMethod>)); + + sizeof(*sp)); lock = table->GetHandle(0).ToJObject(); } @@ -1947,17 +1942,14 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, template<InvokeType type, bool access_check> static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, - Thread* self, StackReference<mirror::ArtMethod>* sp); + ArtMethod* caller_method, Thread* self, ArtMethod** sp); template<InvokeType type, bool access_check> static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, - Thread* self, StackReference<mirror::ArtMethod>* sp) { + ArtMethod* caller_method, Thread* self, ArtMethod** sp) { ScopedQuickEntrypointChecks sqec(self); - DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); - mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, - type); + DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); + ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == nullptr)) { const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); uint32_t shorty_len; @@ -1994,9 +1986,9 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \ mirror::Object* this_object, \ - mirror::ArtMethod* caller_method, \ + ArtMethod* caller_method, \ Thread* self, \ - StackReference<mirror::ArtMethod>* sp) \ + ArtMethod** sp) \ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false); EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true); @@ -2013,8 +2005,7 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); // See comments in runtime_support_asm.S extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod* caller_method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp); @@ -2022,8 +2013,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod* caller_method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp); @@ -2031,8 +2021,7 @@ extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod* caller_method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp); @@ -2040,8 +2029,7 @@ extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod* caller_method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp); @@ -2049,31 +2037,31 @@ extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod* caller_method, Thread* self, ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp); } // Determine target of interface dispatch. This object is known non-null. -extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method, +extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method, mirror::Object* this_object, - mirror::ArtMethod* caller_method, + ArtMethod* caller_method, Thread* self, - StackReference<mirror::ArtMethod>* sp) + ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); - mirror::ArtMethod* method; + ArtMethod* method; if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) { - method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method); + method = this_object->GetClass()->FindVirtualMethodForInterface( + interface_method, sizeof(void*)); if (UNLIKELY(method == nullptr)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object, - caller_method); + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch( + interface_method, this_object, caller_method); return GetTwoWordFailureValue(); // Failure. } } else { - DCHECK(interface_method == Runtime::Current()->GetResolutionMethod()); + DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod()); // Find the caller PC. constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc index 01c17acdcc..5cdf9677ef 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc @@ -16,9 +16,9 @@ #include <stdint.h> +#include "art_method-inl.h" #include "callee_save_frame.h" #include "common_runtime_test.h" -#include "mirror/art_method-inl.h" #include "quick/quick_method_frame_info.h" namespace art { @@ -31,8 +31,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest { options->push_back(std::make_pair("imageinstructionset", "x86_64")); } - static mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, - Runtime::CalleeSaveType type) + static ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, Runtime::CalleeSaveType type) NO_THREAD_SAFETY_ANALYSIS { Runtime* r = Runtime::Current(); @@ -40,7 +39,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest { t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods. r->SetInstructionSet(isa); - mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(); + ArtMethod* save_method = r->CreateCalleeSaveMethod(); r->SetCalleeSaveMethod(save_method, type); t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down. @@ -50,7 +49,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest { static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size) NO_THREAD_SAFETY_ANALYSIS { - mirror::ArtMethod* save_method = CreateCalleeSaveMethod(isa, type); + ArtMethod* save_method = CreateCalleeSaveMethod(isa, type); QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo(); EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for " << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills=" @@ -59,7 +58,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest { static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset) NO_THREAD_SAFETY_ANALYSIS { - mirror::ArtMethod* save_method = CreateCalleeSaveMethod(isa, type); + ArtMethod* save_method = CreateCalleeSaveMethod(isa, type); QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo(); EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset) << "Expected and real pc offset differs for " << type diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h index bfe7ee8a34..8209dc808e 100644 --- a/runtime/entrypoints/runtime_asm_entrypoints.h +++ b/runtime/entrypoints/runtime_asm_entrypoints.h @@ -29,19 +29,19 @@ static inline const void* GetJniDlsymLookupStub() { } // Return the address of quick stub code for handling IMT conflicts. -extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*); +extern "C" void art_quick_imt_conflict_trampoline(ArtMethod*); static inline const void* GetQuickImtConflictStub() { return reinterpret_cast<const void*>(art_quick_imt_conflict_trampoline); } // Return the address of quick stub code for bridging from quick code to the interpreter. -extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*); +extern "C" void art_quick_to_interpreter_bridge(ArtMethod*); static inline const void* GetQuickToInterpreterBridge() { return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge); } // Return the address of quick stub code for handling JNI calls. -extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*); +extern "C" void art_quick_generic_jni_trampoline(ArtMethod*); static inline const void* GetQuickGenericJniStub() { return reinterpret_cast<const void*>(art_quick_generic_jni_trampoline); } @@ -53,7 +53,7 @@ static inline const void* GetQuickProxyInvokeHandler() { } // Return the address of quick stub code for resolving a method at first call. -extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*); +extern "C" void art_quick_resolution_trampoline(ArtMethod*); static inline const void* GetQuickResolutionStub() { return reinterpret_cast<const void*>(art_quick_resolution_trampoline); } diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc index 6808000e5e..bc3ba216e9 100644 --- a/runtime/exception_test.cc +++ b/runtime/exception_test.cc @@ -96,11 +96,11 @@ class ExceptionTest : public CommonRuntimeTest { CHECK_EQ(mapping_table_offset & 1u, 0u); const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset]; - method_f_ = my_klass_->FindVirtualMethod("f", "()I"); + method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); ASSERT_TRUE(method_f_ != nullptr); method_f_->SetEntryPointFromQuickCompiledCode(code_ptr); - method_g_ = my_klass_->FindVirtualMethod("g", "(I)V"); + method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*)); ASSERT_TRUE(method_g_ != nullptr); method_g_->SetEntryPointFromQuickCompiledCode(code_ptr); } @@ -113,8 +113,8 @@ class ExceptionTest : public CommonRuntimeTest { std::vector<uint8_t> fake_gc_map_; std::vector<uint8_t> fake_header_code_and_maps_; - mirror::ArtMethod* method_f_; - mirror::ArtMethod* method_g_; + ArtMethod* method_f_; + ArtMethod* method_g_; private: mirror::Class* my_klass_; @@ -167,7 +167,7 @@ TEST_F(ExceptionTest, StackTraceElement) { std::vector<uintptr_t> fake_stack; Runtime* r = Runtime::Current(); r->SetInstructionSet(kRuntimeISA); - mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(); + ArtMethod* save_method = r->CreateCalleeSaveMethod(); r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll); QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo(); @@ -209,14 +209,13 @@ TEST_F(ExceptionTest, StackTraceElement) { fake_stack.push_back(0); // Set up thread to appear as if we called out of method_g_ at pc dex 3 - thread->SetTopOfStack(reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0])); + thread->SetTopOfStack(reinterpret_cast<ArtMethod**>(&fake_stack[0])); jobject internal = thread->CreateInternalStackTrace<false>(soa); ASSERT_TRUE(internal != nullptr); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); ASSERT_TRUE(ste_array != nullptr); - mirror::ObjectArray<mirror::StackTraceElement>* trace_array = - soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array); + auto* trace_array = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array); ASSERT_TRUE(trace_array != nullptr); ASSERT_TRUE(trace_array->Get(0) != nullptr); diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 83f3ae1c87..4a352ddf9a 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -19,8 +19,9 @@ #include <setjmp.h> #include <sys/mman.h> #include <sys/ucontext.h> + +#include "art_method-inl.h" #include "base/stl_util.h" -#include "mirror/art_method.h" #include "mirror/class.h" #include "sigchain.h" #include "thread-inl.h" @@ -321,7 +322,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che return false; } - mirror::ArtMethod* method_obj = 0; + ArtMethod* method_obj = 0; uintptr_t return_pc = 0; uintptr_t sp = 0; @@ -331,6 +332,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che // If we don't have a potential method, we're outta here. VLOG(signals) << "potential method: " << method_obj; + // TODO: Check linear alloc and image. if (method_obj == 0 || !IsAligned<kObjectAlignment>(method_obj)) { VLOG(signals) << "no method"; return false; @@ -341,7 +343,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che // Check that the class pointer inside the object is not null and is aligned. // TODO: Method might be not a heap address, and GetClass could fault. // No read barrier because method_obj may not be a real object. - mirror::Class* cls = method_obj->GetClass<kVerifyNone, kWithoutReadBarrier>(); + mirror::Class* cls = method_obj->GetDeclaringClassNoBarrier(); if (cls == nullptr) { VLOG(signals) << "not a class"; return false; @@ -357,12 +359,6 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che return false; } - // Now make sure the class is a mirror::ArtMethod. - if (!cls->IsArtMethodClass()) { - VLOG(signals) << "not a method"; - return false; - } - // We can be certain that this is a method now. Check if we have a GC map // at the return PC address. if (true || kIsDebugBuild) { @@ -418,16 +414,14 @@ bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) { #endif if (in_generated_code) { LOG(ERROR) << "Dumping java stack trace for crash in generated code"; - mirror::ArtMethod* method = nullptr; + ArtMethod* method = nullptr; uintptr_t return_pc = 0; uintptr_t sp = 0; Thread* self = Thread::Current(); manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp); // Inside of generated code, sp[0] is the method, so sp is the frame. - StackReference<mirror::ArtMethod>* frame = - reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp); - self->SetTopOfStack(frame); + self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp)); #ifdef TEST_NESTED_SIGNAL // To test the nested signal handler we raise a signal here. This will cause the // nested signal handler to be called and perform a longjmp back to the setjmp diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h index adac4c276a..3b03a1427d 100644 --- a/runtime/fault_handler.h +++ b/runtime/fault_handler.h @@ -27,10 +27,7 @@ namespace art { -namespace mirror { class ArtMethod; -} // namespace mirror - class FaultHandler; class FaultManager { @@ -58,7 +55,7 @@ class FaultManager { // The IsInGeneratedCode() function checks that the mutator lock is held before it // calls GetMethodAndReturnPCAndSP(). // TODO: think about adding lock assertions and fake lock and unlock functions. - void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method, + void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method, uintptr_t* out_return_pc, uintptr_t* out_sp) NO_THREAD_SAFETY_ANALYSIS; bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc) diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc index 043b5580c3..363b76afd5 100644 --- a/runtime/gc/accounting/mod_union_table_test.cc +++ b/runtime/gc/accounting/mod_union_table_test.cc @@ -198,12 +198,12 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) { obj1->Set(1, other_space_ref1); obj2->Set(3, other_space_ref2); table->ClearCards(); - std::set<mirror::Object*> visited; - table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited); + std::set<mirror::Object*> visited_before; + table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_before); // Check that we visited all the references in other spaces only. - ASSERT_GE(visited.size(), 2u); - ASSERT_TRUE(visited.find(other_space_ref1) != visited.end()); - ASSERT_TRUE(visited.find(other_space_ref2) != visited.end()); + ASSERT_GE(visited_before.size(), 2u); + ASSERT_TRUE(visited_before.find(other_space_ref1) != visited_before.end()); + ASSERT_TRUE(visited_before.find(other_space_ref2) != visited_before.end()); // Verify that all the other references were visited. // obj1, obj2 cards should still be in mod union table since they have references to other // spaces. @@ -229,12 +229,15 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) { ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(ptr))); } // Visit again and make sure the cards got cleared back to their sane state. - visited.clear(); - table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited); - // Verify that the dump matches what we saw earlier. + std::set<mirror::Object*> visited_after; + table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_after); + // Check that we visited a superset after. + for (auto* obj : visited_before) { + ASSERT_TRUE(visited_after.find(obj) != visited_after.end()) << obj; + } + // Verify that the dump still works. std::ostringstream oss2; table->Dump(oss2); - ASSERT_EQ(oss.str(), oss2.str()); // Remove the space we added so it doesn't persist to the next test. heap->RemoveSpace(other_space.get()); } diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 84dadea8ea..fe2b284fcb 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -21,7 +21,7 @@ #include "dex_file-inl.h" #include "mem_map.h" #include "mirror/object-inl.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "mirror/object_array.h" namespace art { diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 26f349a839..658390dd2d 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -22,6 +22,7 @@ #include "gc/space/image_space.h" #include "gc/space/space.h" #include "intern_table.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "scoped_thread_state_change.h" #include "thread-inl.h" diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 2a9c03dcef..1c9c41204a 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -401,7 +401,8 @@ class MarkSweepMarkObjectSlowPath { << (field != nullptr ? field->GetTypeDescriptor() : "") << " first_ref_field_offset=" << (holder_->IsClass() - ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset() + ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset( + sizeof(void*)) : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset()) << " num_of_ref_fields=" << (holder_->IsClass() @@ -589,7 +590,8 @@ void MarkSweep::MarkNonThreadRoots() { void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); // Visit all runtime roots and clear dirty flags. - Runtime::Current()->VisitConcurrentRoots(this, flags); + Runtime::Current()->VisitConcurrentRoots( + this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); } class ScanObjectVisitor { diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index eb0e9beea9..2d5433032d 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -30,7 +30,6 @@ #include "gc/space/rosalloc_space-inl.h" #include "runtime.h" #include "handle_scope-inl.h" -#include "thread.h" #include "thread-inl.h" #include "utils.h" #include "verify_object-inl.h" diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index fbde4947c0..59d0259f2b 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1630,7 +1630,12 @@ size_t Heap::GetObjectsAllocated() const { } uint64_t Heap::GetObjectsAllocatedEver() const { - return GetObjectsFreedEver() + GetObjectsAllocated(); + uint64_t total = GetObjectsFreedEver(); + // If we are detached, we can't use GetObjectsAllocated since we can't change thread states. + if (Thread::Current() != nullptr) { + total += GetObjectsAllocated(); + } + return total; } uint64_t Heap::GetBytesAllocatedEver() const { diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 5af2a53ab2..4d51d387c8 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -17,8 +17,8 @@ #include "reference_processor.h" #include "base/time_utils.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" -#include "mirror/reference.h" #include "mirror/reference-inl.h" #include "reference_processor-inl.h" #include "reflection.h" diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index ade9cec031..437fd8c5c9 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -23,13 +23,13 @@ #include <random> +#include "art_method.h" #include "base/macros.h" #include "base/stl_util.h" #include "base/scoped_flock.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" #include "gc/accounting/space_bitmap-inl.h" -#include "mirror/art_method.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "oat_file.h" @@ -687,7 +687,20 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat image_file_size, image_header.GetImageSize()); return nullptr; } - auto end_of_bitmap = image_header.GetImageBitmapOffset() + image_header.GetImageBitmapSize(); + + if (kIsDebugBuild) { + LOG(INFO) << "Dumping image sections"; + for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) { + const auto section_idx = static_cast<ImageHeader::ImageSections>(i); + auto& section = image_header.GetImageSection(section_idx); + LOG(INFO) << section_idx << " start=" + << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset()) + << section; + } + } + + const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap); + auto end_of_bitmap = static_cast<size_t>(bitmap_section.End()); if (end_of_bitmap != image_file_size) { *error_msg = StringPrintf( "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.", image_file_size, @@ -697,7 +710,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat // Note: The image header is part of the image due to mmap page alignment required of offset. std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress( - image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(), + image_header.GetImageBegin(), image_header.GetImageSize(), PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg)); if (map.get() == nullptr) { DCHECK(!error_msg->empty()); @@ -706,13 +719,9 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat CHECK_EQ(image_header.GetImageBegin(), map->Begin()); DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader))); - std::unique_ptr<MemMap> image_map( - MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(), - PROT_READ, MAP_PRIVATE, - file->Fd(), image_header.GetImageBitmapOffset(), - false, - image_filename, - error_msg)); + std::unique_ptr<MemMap> image_map(MemMap::MapFileAtAddress( + nullptr, bitmap_section.Size(), PROT_READ, MAP_PRIVATE, file->Fd(), + bitmap_section.Offset(), false, image_filename, error_msg)); if (image_map.get() == nullptr) { *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str()); return nullptr; @@ -729,7 +738,9 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat return nullptr; } - uint8_t* const image_end = map->Begin() + image_header.GetImageSize(); + // We only want the mirror object, not the ArtFields and ArtMethods. + uint8_t* const image_end = + map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End(); std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename, image_location, map.release(), bitmap.release(), image_end)); @@ -753,25 +764,16 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat Runtime* runtime = Runtime::Current(); runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet()); - mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod); - runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method)); - mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod); - runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method)); - mirror::Object* imt_unimplemented_method = - image_header.GetImageRoot(ImageHeader::kImtUnimplementedMethod); - runtime->SetImtUnimplementedMethod(down_cast<mirror::ArtMethod*>(imt_unimplemented_method)); - mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt); - runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt)); - - mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod); - runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), - Runtime::kSaveAll); - callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod); - runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), - Runtime::kRefsOnly); - callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod); - runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), - Runtime::kRefsAndArgs); + runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod)); + runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod)); + runtime->SetImtUnimplementedMethod( + image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod)); + runtime->SetCalleeSaveMethod( + image_header.GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll); + runtime->SetCalleeSaveMethod( + image_header.GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly); + runtime->SetCalleeSaveMethod( + image_header.GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs); if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time) diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 3e9e9f7a49..6e0e0d24c7 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -23,6 +23,8 @@ #include "common_runtime_test.h" #include "globals.h" #include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" #include "mirror/object-inl.h" #include "scoped_thread_state_change.h" #include "zygote_space.h" diff --git a/runtime/globals.h b/runtime/globals.h index 4d7fd2e09c..fe699c6ff3 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -67,12 +67,8 @@ static constexpr bool kUseOptimizingCompiler = false; // Garbage collector constants. static constexpr bool kMovingCollector = true; static constexpr bool kMarkCompactSupport = false && kMovingCollector; -// True if we allow moving field arrays, this can cause complication with mark compact. -static constexpr bool kMoveFieldArrays = !kMarkCompactSupport; // True if we allow moving classes. static constexpr bool kMovingClasses = !kMarkCompactSupport; -// True if we allow moving methods. -static constexpr bool kMovingMethods = false; // If true, the quick compiler embeds class pointers in the compiled // code, if possible. diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index ac28c8ad6e..9a0e52efd3 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -159,6 +159,10 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Thread* Self() const { + return self_; + } + private: template<class T> ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/image.cc b/runtime/image.cc index d9bd2a8937..947c914de6 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,27 +24,21 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '5', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '6', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, - uint32_t art_fields_offset, - uint32_t art_fields_size, - uint32_t image_bitmap_offset, - uint32_t image_bitmap_size, + ImageSection* sections, uint32_t image_roots, uint32_t oat_checksum, uint32_t oat_file_begin, uint32_t oat_data_begin, uint32_t oat_data_end, uint32_t oat_file_end, + uint32_t pointer_size, bool compile_pic) : image_begin_(image_begin), image_size_(image_size), - art_fields_offset_(art_fields_offset), - art_fields_size_(art_fields_size), - image_bitmap_offset_(image_bitmap_offset), - image_bitmap_size_(image_bitmap_size), oat_checksum_(oat_checksum), oat_file_begin_(oat_file_begin), oat_data_begin_(oat_data_begin), @@ -52,6 +46,7 @@ ImageHeader::ImageHeader(uint32_t image_begin, oat_file_end_(oat_file_end), patch_delta_(0), image_roots_(image_roots), + pointer_size_(pointer_size), compile_pic_(compile_pic) { CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); @@ -61,8 +56,10 @@ ImageHeader::ImageHeader(uint32_t image_begin, CHECK_LE(oat_file_begin, oat_data_begin); CHECK_LT(oat_data_begin, oat_data_end); CHECK_LE(oat_data_end, oat_file_end); + CHECK(ValidPointerSize(pointer_size_)) << pointer_size_; memcpy(magic_, kImageMagic, sizeof(kImageMagic)); memcpy(version_, kImageVersion, sizeof(kImageVersion)); + std::copy_n(sections, kSectionCount, sections_); } void ImageHeader::RelocateImage(off_t delta) { @@ -74,6 +71,9 @@ void ImageHeader::RelocateImage(off_t delta) { oat_file_end_ += delta; image_roots_ += delta; patch_delta_ += delta; + for (size_t i = 0; i < kImageMethodsCount; ++i) { + image_methods_[i] += delta; + } } bool ImageHeader::IsValid() const { @@ -128,4 +128,23 @@ mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const { return result; } +ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const { + CHECK_LT(static_cast<size_t>(index), kImageMethodsCount); + return reinterpret_cast<ArtMethod*>(image_methods_[index]); +} + +void ImageHeader::SetImageMethod(ImageMethod index, ArtMethod* method) { + CHECK_LT(static_cast<size_t>(index), kImageMethodsCount); + image_methods_[index] = reinterpret_cast<uint64_t>(method); +} + +const ImageSection& ImageHeader::GetImageSection(ImageSections index) const { + CHECK_LT(static_cast<size_t>(index), kSectionCount); + return sections_[index]; +} + +std::ostream& operator<<(std::ostream& os, const ImageSection& section) { + return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End(); +} + } // namespace art diff --git a/runtime/image.h b/runtime/image.h index 52995edb52..c6be7ef3f7 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -24,6 +24,34 @@ namespace art { +class PACKED(4) ImageSection { + public: + ImageSection() : offset_(0), size_(0) { } + ImageSection(uint32_t offset, uint32_t size) : offset_(offset), size_(size) { } + ImageSection(const ImageSection& section) = default; + ImageSection& operator=(const ImageSection& section) = default; + + uint32_t Offset() const { + return offset_; + } + + uint32_t Size() const { + return size_; + } + + uint32_t End() const { + return Offset() + Size(); + } + + bool Contains(uint64_t offset) const { + return offset - offset_ < size_; + } + + private: + uint32_t offset_; + uint32_t size_; +}; + // header of image files written by ImageWriter, read and validated by Space. class PACKED(4) ImageHeader { public: @@ -31,16 +59,14 @@ class PACKED(4) ImageHeader { ImageHeader(uint32_t image_begin, uint32_t image_size_, - uint32_t art_fields_offset, - uint32_t art_fields_size, - uint32_t image_bitmap_offset, - uint32_t image_bitmap_size, + ImageSection* sections, uint32_t image_roots, uint32_t oat_checksum, uint32_t oat_file_begin, uint32_t oat_data_begin, uint32_t oat_data_end, uint32_t oat_file_end, + uint32_t pointer_size, bool compile_pic_); bool IsValid() const; @@ -54,22 +80,6 @@ class PACKED(4) ImageHeader { return static_cast<uint32_t>(image_size_); } - size_t GetArtFieldsOffset() const { - return art_fields_offset_; - } - - size_t GetArtFieldsSize() const { - return art_fields_size_; - } - - size_t GetImageBitmapOffset() const { - return image_bitmap_offset_; - } - - size_t GetImageBitmapSize() const { - return image_bitmap_size_; - } - uint32_t GetOatChecksum() const { return oat_checksum_; } @@ -94,6 +104,10 @@ class PACKED(4) ImageHeader { return reinterpret_cast<uint8_t*>(oat_file_end_); } + uint32_t GetPointerSize() const { + return pointer_size_; + } + off_t GetPatchDelta() const { return patch_delta_; } @@ -108,19 +122,38 @@ class PACKED(4) ImageHeader { return oat_filename; } - enum ImageRoot { + enum ImageMethod { kResolutionMethod, kImtConflictMethod, kImtUnimplementedMethod, - kDefaultImt, kCalleeSaveMethod, kRefsOnlySaveMethod, kRefsAndArgsSaveMethod, + kImageMethodsCount, // Number of elements in enum. + }; + + enum ImageRoot { kDexCaches, kClassRoots, kImageRootsMax, }; + enum ImageSections { + kSectionObjects, + kSectionArtFields, + kSectionArtMethods, + kSectionImageBitmap, + kSectionCount, // Number of elements in enum. + }; + + ArtMethod* GetImageMethod(ImageMethod index) const; + void SetImageMethod(ImageMethod index, ArtMethod* method); + + const ImageSection& GetImageSection(ImageSections index) const; + const ImageSection& GetMethodsSection() const { + return GetImageSection(kSectionArtMethods); + } + mirror::Object* GetImageRoot(ImageRoot image_root) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::Object>* GetImageRoots() const @@ -145,18 +178,6 @@ class PACKED(4) ImageHeader { // Image size, not page aligned. uint32_t image_size_; - // ArtField array offset. - uint32_t art_fields_offset_; - - // ArtField size in bytes. - uint32_t art_fields_size_; - - // Image bitmap offset in the file. - uint32_t image_bitmap_offset_; - - // Size of the image bitmap. - uint32_t image_bitmap_size_; - // Checksum of the oat file we link to for load time sanity check. uint32_t oat_checksum_; @@ -179,12 +200,26 @@ class PACKED(4) ImageHeader { // Absolute address of an Object[] of objects needed to reinitialize from an image. uint32_t image_roots_; + // Pointer size, this affects the size of the ArtMethods. + uint32_t pointer_size_; + // Boolean (0 or 1) to denote if the image was compiled with --compile-pic option const uint32_t compile_pic_; + // Image sections + ImageSection sections_[kSectionCount]; + + // Image methods. + uint64_t image_methods_[kImageMethodsCount]; + friend class ImageWriter; }; +std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy); +std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& policy); +std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageSections& section); +std::ostream& operator<<(std::ostream& os, const ImageSection& section); + } // namespace art #endif // ART_RUNTIME_IMAGE_H_ diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h index 39d850fc4d..f70503d62a 100644 --- a/runtime/indirect_reference_table-inl.h +++ b/runtime/indirect_reference_table-inl.h @@ -19,6 +19,7 @@ #include "indirect_reference_table.h" +#include "gc_root-inl.h" #include "runtime-inl.h" #include "verify_object-inl.h" diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 98e6200bcb..4ced23d488 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -19,6 +19,7 @@ #include <sstream> #include "arch/context.h" +#include "art_method-inl.h" #include "atomic.h" #include "class_linker.h" #include "debugger.h" @@ -30,7 +31,6 @@ #include "interpreter/interpreter.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" @@ -78,15 +78,15 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) { // could not be initialized or linked with regards to class inheritance. } else { for (size_t i = 0, e = klass->NumDirectMethods(); i < e; i++) { - InstallStubsForMethod(klass->GetDirectMethod(i)); + InstallStubsForMethod(klass->GetDirectMethod(i, sizeof(void*))); } for (size_t i = 0, e = klass->NumVirtualMethods(); i < e; i++) { - InstallStubsForMethod(klass->GetVirtualMethod(i)); + InstallStubsForMethod(klass->GetVirtualMethod(i, sizeof(void*))); } } } -static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code) +static void UpdateEntrypoints(ArtMethod* method, const void* quick_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); jit::Jit* jit = runtime->GetJit(); @@ -114,7 +114,7 @@ static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code) } } -void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) { +void Instrumentation::InstallStubsForMethod(ArtMethod* method) { if (method->IsAbstract() || method->IsProxyMethod()) { // Do not change stubs for these methods. return; @@ -175,7 +175,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) } bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m == nullptr) { if (kVerboseInstrumentation) { LOG(INFO) << " Skipping upcall. Frame " << GetFrameId(); @@ -319,7 +319,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg) if (instrumentation_stack_->size() == 0) { return false; // Stop. } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (GetCurrentQuickFrame() == nullptr) { if (kVerboseInstrumentation) { LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId() @@ -656,7 +656,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() { } } -void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code) { +void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_code) { DCHECK(method->GetDeclaringClass()->IsResolved()); const void* new_quick_code; if (LIKELY(!instrumentation_stubs_installed_)) { @@ -679,67 +679,42 @@ void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* q UpdateEntrypoints(method, new_quick_code); } -bool Instrumentation::AddDeoptimizedMethod(mirror::ArtMethod* method) { - // Note that the insert() below isn't read barrier-aware. So, this - // FindDeoptimizedMethod() call is necessary or else we would end up - // storing the same method twice in the map (the from-space and the - // to-space ones). - if (FindDeoptimizedMethod(method)) { +bool Instrumentation::AddDeoptimizedMethod(ArtMethod* method) { + if (IsDeoptimizedMethod(method)) { // Already in the map. Return. return false; } // Not found. Add it. - static_assert(!kMovingMethods, "Not safe if methods can move"); - int32_t hash_code = method->IdentityHashCode(); - deoptimized_methods_.insert(std::make_pair(hash_code, GcRoot<mirror::ArtMethod>(method))); + deoptimized_methods_.insert(method); return true; } -bool Instrumentation::FindDeoptimizedMethod(mirror::ArtMethod* method) { - static_assert(!kMovingMethods, "Not safe if methods can move"); - int32_t hash_code = method->IdentityHashCode(); - auto range = deoptimized_methods_.equal_range(hash_code); - for (auto it = range.first; it != range.second; ++it) { - mirror::ArtMethod* m = it->second.Read(); - if (m == method) { - // Found. - return true; - } - } - // Not found. - return false; +bool Instrumentation::IsDeoptimizedMethod(ArtMethod* method) { + return deoptimized_methods_.find(method) != deoptimized_methods_.end(); } -mirror::ArtMethod* Instrumentation::BeginDeoptimizedMethod() { - auto it = deoptimized_methods_.begin(); - if (it == deoptimized_methods_.end()) { +ArtMethod* Instrumentation::BeginDeoptimizedMethod() { + if (deoptimized_methods_.empty()) { // Empty. return nullptr; } - return it->second.Read(); + return *deoptimized_methods_.begin(); } -bool Instrumentation::RemoveDeoptimizedMethod(mirror::ArtMethod* method) { - static_assert(!kMovingMethods, "Not safe if methods can move"); - int32_t hash_code = method->IdentityHashCode(); - auto range = deoptimized_methods_.equal_range(hash_code); - for (auto it = range.first; it != range.second; ++it) { - mirror::ArtMethod* m = it->second.Read(); - if (m == method) { - // Found. Erase and return. - deoptimized_methods_.erase(it); - return true; - } +bool Instrumentation::RemoveDeoptimizedMethod(ArtMethod* method) { + auto it = deoptimized_methods_.find(method); + if (it == deoptimized_methods_.end()) { + return false; } - // Not found. - return false; + deoptimized_methods_.erase(it); + return true; } bool Instrumentation::IsDeoptimizedMethodsEmpty() const { return deoptimized_methods_.empty(); } -void Instrumentation::Deoptimize(mirror::ArtMethod* method) { +void Instrumentation::Deoptimize(ArtMethod* method) { CHECK(!method->IsNative()); CHECK(!method->IsProxyMethod()); CHECK(!method->IsAbstract()); @@ -762,7 +737,7 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) { } } -void Instrumentation::Undeoptimize(mirror::ArtMethod* method) { +void Instrumentation::Undeoptimize(ArtMethod* method) { CHECK(!method->IsNative()); CHECK(!method->IsProxyMethod()); CHECK(!method->IsAbstract()); @@ -798,10 +773,10 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) { } } -bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) { +bool Instrumentation::IsDeoptimized(ArtMethod* method) { DCHECK(method != nullptr); ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_); - return FindDeoptimizedMethod(method); + return IsDeoptimizedMethod(method); } void Instrumentation::EnableDeoptimization() { @@ -819,7 +794,7 @@ void Instrumentation::DisableDeoptimization(const char* key) { } // Undeoptimized selected methods. while (true) { - mirror::ArtMethod* method; + ArtMethod* method; { ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_); if (IsDeoptimizedMethodsEmpty()) { @@ -866,7 +841,7 @@ void Instrumentation::DisableMethodTracing(const char* key) { ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing); } -const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const { +const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const { Runtime* runtime = Runtime::Current(); if (LIKELY(!instrumentation_stubs_installed_)) { const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); @@ -883,7 +858,7 @@ const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t p } void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc) const { auto it = method_entry_listeners_.begin(); bool is_end = (it == method_entry_listeners_.end()); @@ -897,7 +872,7 @@ void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_ } void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const { auto it = method_exit_listeners_.begin(); bool is_end = (it == method_exit_listeners_.end()); @@ -911,7 +886,7 @@ void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_o } void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc) const { if (HasMethodUnwindListeners()) { for (InstrumentationListener* listener : method_unwind_listeners_) { @@ -921,7 +896,7 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj } void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc) const { std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_); for (InstrumentationListener* listener : *original.get()) { @@ -929,7 +904,7 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o } } -void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, +void Instrumentation::BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const { for (InstrumentationListener* listener : backward_branch_listeners_) { listener->BackwardBranch(thread, method, offset); @@ -937,7 +912,7 @@ void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* meth } void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field) const { std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_); for (InstrumentationListener* listener : *original.get()) { @@ -946,7 +921,7 @@ void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_ob } void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const { std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_); for (InstrumentationListener* listener : *original.get()) { @@ -980,7 +955,7 @@ static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instr } void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uintptr_t lr, bool interpreter_entry) { // We have a callee-save frame meaning this value is guaranteed to never be 0. size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk); @@ -1011,7 +986,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt CheckStackDepth(self, instrumentation_frame, 0); self->VerifyStack(); - mirror::ArtMethod* method = instrumentation_frame.method_; + ArtMethod* method = instrumentation_frame.method_; uint32_t length; char return_shorty = method->GetShorty(&length)[0]; JValue return_value; @@ -1064,7 +1039,7 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c // TODO: bring back CheckStackDepth(self, instrumentation_frame, 2); stack->pop_front(); - mirror::ArtMethod* method = instrumentation_frame.method_; + ArtMethod* method = instrumentation_frame.method_; if (is_deoptimization) { if (kVerboseInstrumentation) { LOG(INFO) << "Popping for deoptimization " << PrettyMethod(method); @@ -1082,17 +1057,6 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c } } -void Instrumentation::VisitRoots(RootVisitor* visitor) { - WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_); - if (IsDeoptimizedMethodsEmpty()) { - return; - } - BufferedRootVisitor<kDefaultBufferedRootCount> roots(visitor, RootInfo(kRootVMInternal)); - for (auto pair : deoptimized_methods_) { - roots.VisitRoot(pair.second); - } -} - std::string InstrumentationStackFrame::Dump() const { std::ostringstream os; os << "Frame " << frame_id_ << " " << PrettyMethod(method_) << ":" diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index 7d70d211bb..db8e9c2508 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -19,7 +19,7 @@ #include <stdint.h> #include <list> -#include <map> +#include <unordered_set> #include "arch/instruction_set.h" #include "base/macros.h" @@ -29,12 +29,12 @@ namespace art { namespace mirror { - class ArtMethod; class Class; class Object; class Throwable; } // namespace mirror class ArtField; +class ArtMethod; union JValue; class Thread; @@ -62,32 +62,32 @@ struct InstrumentationListener { // Call-back for when a method is entered. virtual void MethodEntered(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; // Call-back for when a method is exited. virtual void MethodExited(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, const JValue& return_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; // Call-back for when a method is popped due to an exception throw. A method will either cause a // MethodExited call-back or a MethodUnwind call-back when its activation is removed. virtual void MethodUnwind(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) + ArtMethod* method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; // Call-back for when the dex pc moves in a method. virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t new_dex_pc) + ArtMethod* method, uint32_t new_dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; // Call-back for when we read from a field. - virtual void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, + virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) = 0; // Call-back for when we write into a field. - virtual void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, + virtual void FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) = 0; // Call-back when an exception is caught. @@ -95,7 +95,7 @@ struct InstrumentationListener { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; // Call-back for when we get a backward branch. - virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset) + virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; }; @@ -162,19 +162,19 @@ class Instrumentation { // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static // method (except a class initializer) set to the resolution trampoline will be deoptimized only // once its declaring class is initialized. - void Deoptimize(mirror::ArtMethod* method) + void Deoptimize(ArtMethod* method) LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method // (except a class initializer) set to the resolution trampoline will be updated only once its // declaring class is initialized. - void Undeoptimize(mirror::ArtMethod* method) + void Undeoptimize(ArtMethod* method) LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); // Indicates whether the method has been deoptimized so it is executed with the interpreter. - bool IsDeoptimized(mirror::ArtMethod* method) + bool IsDeoptimized(ArtMethod* method) LOCKS_EXCLUDED(deoptimized_methods_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -205,13 +205,13 @@ class Instrumentation { void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); // Update the code of a method respecting any installed stubs. - void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code) + void UpdateMethodsCode(ArtMethod* method, const void* quick_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the quick code for the given method. More efficient than asking the class linker as it // will short-cut to GetCode if instrumentation and static method resolution stubs aren't // installed. - const void* GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const + const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ForceInterpretOnly() { @@ -273,7 +273,7 @@ class Instrumentation { // Inform listeners that a method has been entered. A dex PC is provided as we may install // listeners into executing code and get method enter events for methods already on the stack. void MethodEnterEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) const + ArtMethod* method, uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasMethodEntryListeners())) { MethodEnterEventImpl(thread, this_object, method, dex_pc); @@ -282,7 +282,7 @@ class Instrumentation { // Inform listeners that a method has been exited. void MethodExitEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasMethodExitListeners())) { @@ -292,12 +292,12 @@ class Instrumentation { // Inform listeners that a method has been exited due to an exception. void MethodUnwindEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) const + ArtMethod* method, uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Inform listeners that the dex pc has moved (only supported by the interpreter). void DexPcMovedEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) const + ArtMethod* method, uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasDexPcListeners())) { DexPcMovedEventImpl(thread, this_object, method, dex_pc); @@ -305,7 +305,7 @@ class Instrumentation { } // Inform listeners that a backward branch has been taken (only supported by the interpreter). - void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t offset) const + void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasBackwardBranchListeners())) { BackwardBranchImpl(thread, method, offset); @@ -314,7 +314,7 @@ class Instrumentation { // Inform listeners that we read a field (only supported by the interpreter). void FieldReadEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasFieldReadListeners())) { @@ -324,7 +324,7 @@ class Instrumentation { // Inform listeners that we write a field (only supported by the interpreter). void FieldWriteEvent(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(HasFieldWriteListeners())) { @@ -339,7 +339,7 @@ class Instrumentation { // Called when an instrumented method is entered. The intended link register (lr) is saved so // that returning causes a branch to the method exit stub. Generates method enter events. void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, - mirror::ArtMethod* method, uintptr_t lr, + ArtMethod* method, uintptr_t lr, bool interpreter_entry) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -356,12 +356,9 @@ class Instrumentation { // Call back for configure stubs. void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void InstallStubsForMethod(mirror::ArtMethod* method) + void InstallStubsForMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); - private: InstrumentationLevel GetCurrentInstrumentationLevel() const; @@ -384,42 +381,39 @@ class Instrumentation { void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS; void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) const + ArtMethod* method, uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void MethodExitEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, + ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) const + ArtMethod* method, uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, int32_t offset) const + void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FieldReadEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Read barrier-aware utility functions for accessing deoptimized_methods_ - bool AddDeoptimizedMethod(mirror::ArtMethod* method) + bool AddDeoptimizedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); - bool FindDeoptimizedMethod(mirror::ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_); - bool RemoveDeoptimizedMethod(mirror::ArtMethod* method) + bool IsDeoptimizedMethod(ArtMethod* method) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + bool RemoveDeoptimizedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); - mirror::ArtMethod* BeginDeoptimizedMethod() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_); + ArtMethod* BeginDeoptimizedMethod() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); bool IsDeoptimizedMethodsEmpty() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code? bool instrumentation_stubs_installed_; @@ -488,8 +482,7 @@ class Instrumentation { // The set of methods being deoptimized (by the debugger) which must be executed with interpreter // only. mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - std::multimap<int32_t, GcRoot<mirror::ArtMethod>> deoptimized_methods_ - GUARDED_BY(deoptimized_methods_lock_); + std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_); bool deoptimization_enabled_; // Current interpreter handler table. This is updated each time the thread state flags are @@ -509,7 +502,7 @@ std::ostream& operator<<(std::ostream& os, const Instrumentation::Instrumentatio // An element in the instrumentation side stack maintained in art::Thread. struct InstrumentationStackFrame { - InstrumentationStackFrame(mirror::Object* this_object, mirror::ArtMethod* method, + InstrumentationStackFrame(mirror::Object* this_object, ArtMethod* method, uintptr_t return_pc, size_t frame_id, bool interpreter_entry) : this_object_(this_object), method_(method), return_pc_(return_pc), frame_id_(frame_id), interpreter_entry_(interpreter_entry) { @@ -518,7 +511,7 @@ struct InstrumentationStackFrame { std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Object* this_object_; - mirror::ArtMethod* method_; + ArtMethod* method_; uintptr_t return_pc_; size_t frame_id_; bool interpreter_entry_; diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc index 5afacb8feb..85bb8c4197 100644 --- a/runtime/instrumentation_test.cc +++ b/runtime/instrumentation_test.cc @@ -42,7 +42,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void MethodEntered(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { received_method_enter_event = true; @@ -50,7 +50,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void MethodExited(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, const JValue& return_value ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -59,7 +59,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { received_method_unwind_event = true; @@ -67,7 +67,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void DexPcMoved(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t new_dex_pc ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { received_dex_pc_moved_event = true; @@ -75,7 +75,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -84,7 +84,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED, const JValue& field_value ATTRIBUTE_UNUSED) @@ -99,7 +99,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio } void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, int32_t dex_pc_offset ATTRIBUTE_UNUSED) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { received_backward_branch_event = true; @@ -170,7 +170,7 @@ class InstrumentationTest : public CommonRuntimeTest { soa.Self()->TransitionFromSuspendedToRunnable(); } - mirror::ArtMethod* const event_method = nullptr; + ArtMethod* const event_method = nullptr; mirror::Object* const event_obj = nullptr; const uint32_t event_dex_pc = 0; @@ -197,8 +197,7 @@ class InstrumentationTest : public CommonRuntimeTest { EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event)); } - void DeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method, - bool enable_deoptimization) + void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); @@ -207,19 +206,19 @@ class InstrumentationTest : public CommonRuntimeTest { if (enable_deoptimization) { instrumentation->EnableDeoptimization(); } - instrumentation->Deoptimize(method.Get()); + instrumentation->Deoptimize(method); runtime->GetThreadList()->ResumeAll(); self->TransitionFromSuspendedToRunnable(); } - void UndeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method, + void UndeoptimizeMethod(Thread* self, ArtMethod* method, const char* key, bool disable_deoptimization) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); runtime->GetThreadList()->SuspendAll("Single method undeoptimization"); - instrumentation->Undeoptimize(method.Get()); + instrumentation->Undeoptimize(method); if (disable_deoptimization) { instrumentation->DisableDeoptimization(key); } @@ -304,7 +303,7 @@ class InstrumentationTest : public CommonRuntimeTest { } static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type, - Thread* self, mirror::ArtMethod* method, mirror::Object* obj, + Thread* self, ArtMethod* method, mirror::Object* obj, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { switch (event_type) { @@ -434,28 +433,28 @@ TEST_F(InstrumentationTest, DeoptimizeDirectMethod) { Runtime* const runtime = Runtime::Current(); instrumentation::Instrumentation* instr = runtime->GetInstrumentation(); ClassLinker* class_linker = runtime->GetClassLinker(); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader))); mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader); ASSERT_TRUE(klass != nullptr); - Handle<mirror::ArtMethod> method_to_deoptimize( - hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V"))); - ASSERT_TRUE(method_to_deoptimize.Get() != nullptr); + ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V", + sizeof(void*)); + ASSERT_TRUE(method_to_deoptimize != nullptr); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); - EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize)); DeoptimizeMethod(soa.Self(), method_to_deoptimize, true); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); EXPECT_TRUE(instr->AreExitStubsInstalled()); - EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize)); constexpr const char* instrumentation_key = "DeoptimizeDirectMethod"; UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); - EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize)); } TEST_F(InstrumentationTest, FullDeoptimization) { @@ -481,16 +480,16 @@ TEST_F(InstrumentationTest, MixedDeoptimization) { Runtime* const runtime = Runtime::Current(); instrumentation::Instrumentation* instr = runtime->GetInstrumentation(); ClassLinker* class_linker = runtime->GetClassLinker(); - StackHandleScope<2> hs(soa.Self()); + StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader))); mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader); ASSERT_TRUE(klass != nullptr); - Handle<mirror::ArtMethod> method_to_deoptimize( - hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V"))); - ASSERT_TRUE(method_to_deoptimize.Get() != nullptr); + ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V", + sizeof(void*)); + ASSERT_TRUE(method_to_deoptimize != nullptr); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); - EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize)); DeoptimizeMethod(soa.Self(), method_to_deoptimize, true); // Deoptimizing a method does not change instrumentation level. @@ -498,7 +497,7 @@ TEST_F(InstrumentationTest, MixedDeoptimization) { GetCurrentInstrumentationLevel()); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); EXPECT_TRUE(instr->AreExitStubsInstalled()); - EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize)); constexpr const char* instrumentation_key = "MixedDeoptimization"; DeoptimizeEverything(soa.Self(), instrumentation_key, false); @@ -506,20 +505,20 @@ TEST_F(InstrumentationTest, MixedDeoptimization) { GetCurrentInstrumentationLevel()); EXPECT_TRUE(instr->AreAllMethodsDeoptimized()); EXPECT_TRUE(instr->AreExitStubsInstalled()); - EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize)); UndeoptimizeEverything(soa.Self(), instrumentation_key, false); EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing, GetCurrentInstrumentationLevel()); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); EXPECT_TRUE(instr->AreExitStubsInstalled()); - EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize)); UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true); EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing, GetCurrentInstrumentationLevel()); EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); - EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get())); + EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize)); } TEST_F(InstrumentationTest, MethodTracing_Interpreter) { diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index a85d10fafb..9abbca8460 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -18,6 +18,7 @@ #include <memory> +#include "gc_root-inl.h" #include "gc/space/image_space.h" #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index 7d634b3d25..446c5bb4a5 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -22,10 +22,10 @@ namespace art { namespace mirror { -class ArtMethod; class Object; } // namespace mirror +class ArtMethod; union JValue; class ShadowFrame; class Thread; @@ -33,7 +33,7 @@ class Thread; namespace interpreter { // Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array. -extern void EnterInterpreterFromInvoke(Thread* self, mirror::ArtMethod* method, +extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 363c65afc1..1ed1a649b8 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -413,25 +413,19 @@ EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot) // iput-objec #undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL #undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL -uint32_t FindNextInstructionFollowingException(Thread* self, - ShadowFrame& shadow_frame, - uint32_t dex_pc, - const instrumentation::Instrumentation* instrumentation) { +uint32_t FindNextInstructionFollowingException( + Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc, + const instrumentation::Instrumentation* instrumentation) { self->VerifyStack(); - StackHandleScope<3> hs(self); + StackHandleScope<2> hs(self); Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException())); if (instrumentation->HasExceptionCaughtListeners() && self->IsExceptionThrownByCurrentMethod(exception.Get())) { instrumentation->ExceptionCaughtEvent(self, exception.Get()); } bool clear_exception = false; - uint32_t found_dex_pc; - { - Handle<mirror::Class> exception_class(hs.NewHandle(exception->GetClass())); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(shadow_frame.GetMethod())); - found_dex_pc = mirror::ArtMethod::FindCatchBlock(h_method, exception_class, dex_pc, - &clear_exception); - } + uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock( + hs.NewHandle(exception->GetClass()), dex_pc, &clear_exception); if (found_dex_pc == DexFile::kDexNoIndex) { // Exception is not caught by the current method. We will unwind to the // caller. Notify any instrumentation listener. @@ -651,7 +645,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, UNREACHABLE(); } // Force the use of interpreter when it is required by the debugger. - mirror::EntryPointFromInterpreter* entry; + EntryPointFromInterpreter* entry; if (UNLIKELY(Dbg::IsForcedInterpreterNeededForCalling(self, new_shadow_frame->GetMethod()))) { entry = &art::artInterpreterToInterpreterBridge; } else { @@ -668,7 +662,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, shadow_frame.SetVRegReference(vregC, result->GetL()); // Overwrite all potential copies of the original result of the new-instance of string with the // new result of the StringFactory. Use the verifier to find this set of registers. - mirror::ArtMethod* method = shadow_frame.GetMethod(); + ArtMethod* method = shadow_frame.GetMethod(); MethodReference method_ref = method->ToMethodReference(); SafeMap<uint32_t, std::set<uint32_t>> string_init_map; SafeMap<uint32_t, std::set<uint32_t>>* string_init_map_ptr; @@ -788,13 +782,17 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) RecordArrayElementsInTransactionImpl(array->AsShortArray(), count); break; case Primitive::kPrimInt: - case Primitive::kPrimFloat: RecordArrayElementsInTransactionImpl(array->AsIntArray(), count); break; + case Primitive::kPrimFloat: + RecordArrayElementsInTransactionImpl(array->AsFloatArray(), count); + break; case Primitive::kPrimLong: - case Primitive::kPrimDouble: RecordArrayElementsInTransactionImpl(array->AsLongArray(), count); break; + case Primitive::kPrimDouble: + RecordArrayElementsInTransactionImpl(array->AsDoubleArray(), count); + break; default: LOG(FATAL) << "Unsupported primitive type " << primitive_component_type << " in fill-array-data"; diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 6acc72e4a9..6fafcd1611 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -25,6 +25,7 @@ #include <sstream> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/macros.h" #include "class_linker-inl.h" @@ -33,7 +34,6 @@ #include "dex_instruction-inl.h" #include "entrypoints/entrypoint_utils-inl.h" #include "handle_scope-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -41,7 +41,7 @@ #include "thread.h" #include "well_known_classes.h" -using ::art::mirror::ArtMethod; +using ::art::ArtMethod; using ::art::mirror::Array; using ::art::mirror::BooleanArray; using ::art::mirror::ByteArray; @@ -105,7 +105,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c(); Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC); - mirror::ArtMethod* sf_method = shadow_frame.GetMethod(); + ArtMethod* sf_method = shadow_frame.GetMethod(); ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>( method_idx, &receiver, &sf_method, self); // The shadow frame should already be pushed, so we don't need to update it. @@ -139,7 +139,8 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, } const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable()); - ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx); + ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry( + vtable_idx, sizeof(void*)); if (UNLIKELY(called_method == nullptr)) { CHECK(self->IsExceptionPending()); result->SetJ(0); @@ -184,7 +185,6 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1 // java.lang.String class is initialized. static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(!kMovingMethods); Class* java_lang_string_class = String::GetJavaLangString(); if (UNLIKELY(!java_lang_string_class->IsInitialized())) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -195,7 +195,7 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin return nullptr; } } - mirror::ArtMethod* method = shadow_frame.GetMethod(); + ArtMethod* method = shadow_frame.GetMethod(); mirror::Class* declaring_class = method->GetDeclaringClass(); mirror::String* s = declaring_class->GetDexCacheStrings()->Get(string_idx); if (UNLIKELY(s == nullptr)) { diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index dd1f55e6b2..86027c542f 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -1042,7 +1042,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF HANDLE_PENDING_EXCEPTION(); } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - IntArray* array = a->AsIntArray(); + DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a); + auto* array = down_cast<IntArray*>(a); if (LIKELY(array->CheckIsValidIndex(index))) { shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); @@ -1060,7 +1061,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF HANDLE_PENDING_EXCEPTION(); } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - LongArray* array = a->AsLongArray(); + DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a); + auto* array = down_cast<LongArray*>(a); if (LIKELY(array->CheckIsValidIndex(index))) { shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); @@ -1173,7 +1175,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF } else { int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - IntArray* array = a->AsIntArray(); + DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a); + auto* array = down_cast<IntArray*>(a); if (LIKELY(array->CheckIsValidIndex(index))) { array->SetWithoutChecks<transaction_active>(index, val); ADVANCE(2); @@ -1192,7 +1195,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF } else { int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - LongArray* array = a->AsLongArray(); + DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a); + auto* array = down_cast<LongArray*>(a); if (LIKELY(array->CheckIsValidIndex(index))) { array->SetWithoutChecks<transaction_active>(index, val); ADVANCE(2); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 0e3420ffb5..dd7aa40368 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -888,7 +888,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, break; } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - IntArray* array = a->AsIntArray(); + DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a); + auto* array = down_cast<IntArray*>(a); if (array->CheckIsValidIndex(index)) { shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); @@ -906,7 +907,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, break; } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - LongArray* array = a->AsLongArray(); + DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a); + auto* array = down_cast<LongArray*>(a); if (array->CheckIsValidIndex(index)) { shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); @@ -1019,7 +1021,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, } int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - IntArray* array = a->AsIntArray(); + DCHECK(a->IsIntArray() || a->IsFloatArray()) << PrettyTypeOf(a); + auto* array = down_cast<IntArray*>(a); if (array->CheckIsValidIndex(index)) { array->SetWithoutChecks<transaction_active>(index, val); inst = inst->Next_2xx(); @@ -1038,7 +1041,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, } int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); - LongArray* array = a->AsLongArray(); + DCHECK(a->IsLongArray() || a->IsDoubleArray()) << PrettyTypeOf(a); + LongArray* array = down_cast<LongArray*>(a); if (array->CheckIsValidIndex(index)) { array->SetWithoutChecks<transaction_active>(index, val); inst = inst->Next_2xx(); diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 738e52beac..43e24faed3 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -21,6 +21,7 @@ #include "ScopedLocalRef.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/macros.h" #include "class_linker.h" @@ -29,7 +30,6 @@ #include "handle_scope-inl.h" #include "interpreter/interpreter_common.h" #include "mirror/array-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/field-inl.h" #include "mirror/object-inl.h" @@ -121,8 +121,7 @@ static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, siz } void UnstartedRuntime::UnstartedClassForName( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset); if (class_name == nullptr) { return; @@ -135,8 +134,7 @@ void UnstartedRuntime::UnstartedClassForName( } void UnstartedRuntime::UnstartedClassForNameLong( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset); if (class_name == nullptr) { return; @@ -153,8 +151,7 @@ void UnstartedRuntime::UnstartedClassForNameLong( } void UnstartedRuntime::UnstartedClassClassForName( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset); if (class_name == nullptr) { return; @@ -171,9 +168,8 @@ void UnstartedRuntime::UnstartedClassClassForName( } void UnstartedRuntime::UnstartedClassNewInstance( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - StackHandleScope<3> hs(self); // Class, constructor, object. + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + StackHandleScope<2> hs(self); // Class, constructor, object. mirror::Object* param = shadow_frame->GetVRegReference(arg_offset); if (param == nullptr) { AbortTransactionOrFail(self, "Null-pointer in Class.newInstance."); @@ -202,13 +198,13 @@ void UnstartedRuntime::UnstartedClassNewInstance( // 2) If we can't find the default constructor. We'll postpone the exception to runtime. // Note that 2) could likely be handled here, but for safety abort the transaction. bool ok = false; - if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) { - Handle<mirror::ArtMethod> h_cons(hs.NewHandle( - h_klass->FindDeclaredDirectMethod("<init>", "()V"))); - if (h_cons.Get() != nullptr) { + auto* cl = Runtime::Current()->GetClassLinker(); + if (cl->EnsureInitialized(self, h_klass, true, true)) { + auto* cons = h_klass->FindDeclaredDirectMethod("<init>", "()V", cl->GetImagePointerSize()); + if (cons != nullptr) { Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self))); CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time. - EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr); + EnterInterpreterFromInvoke(self, cons, h_obj.Get(), nullptr, nullptr); if (!self->IsExceptionPending()) { result->SetL(h_obj.Get()); ok = true; @@ -227,8 +223,7 @@ void UnstartedRuntime::UnstartedClassNewInstance( } void UnstartedRuntime::UnstartedClassGetDeclaredField( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail // going the reflective Dex way. mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass(); @@ -266,8 +261,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredField( } void UnstartedRuntime::UnstartedVmClassLoaderFindLoadedClass( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString(); mirror::ClassLoader* class_loader = down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset)); @@ -286,11 +280,9 @@ void UnstartedRuntime::UnstartedVmClassLoaderFindLoadedClass( } } -void UnstartedRuntime::UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED, - ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, - JValue* result, - size_t arg_offset ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedVoidLookupType( + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, JValue* result, + size_t arg_offset ATTRIBUTE_UNUSED) { result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V')); } @@ -324,8 +316,7 @@ static void PrimitiveArrayCopy(Thread* self, } void UnstartedRuntime::UnstartedSystemArraycopy( - Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) { // Special case array copying without initializing System. jint src_pos = shadow_frame->GetVReg(arg_offset + 1); jint dst_pos = shadow_frame->GetVReg(arg_offset + 3); @@ -410,22 +401,19 @@ void UnstartedRuntime::UnstartedSystemArraycopy( } void UnstartedRuntime::UnstartedSystemArraycopyChar( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { // Just forward. UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset); } void UnstartedRuntime::UnstartedSystemArraycopyInt( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { // Just forward. UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset); } void UnstartedRuntime::UnstartedThreadLocalGet( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) { std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod())); bool ok = false; if (caller == "java.lang.String java.lang.IntegralToString.convertInt" @@ -450,8 +438,9 @@ void UnstartedRuntime::UnstartedThreadLocalGet( Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle( h_real_to_string_class->AllocObject(self))); if (h_real_to_string_obj.Get() != nullptr) { - mirror::ArtMethod* init_method = - h_real_to_string_class->FindDirectMethod("<init>", "()V"); + auto* cl = Runtime::Current()->GetClassLinker(); + ArtMethod* init_method = h_real_to_string_class->FindDirectMethod( + "<init>", "()V", cl->GetImagePointerSize()); if (init_method == nullptr) { h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail); } else { @@ -488,16 +477,8 @@ void UnstartedRuntime::UnstartedMathCeil( result->SetD(out); } -void UnstartedRuntime::UnstartedArtMethodGetMethodName( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod(); - result->SetL(method->GetNameAsString(self)); -} - void UnstartedRuntime::UnstartedObjectHashCode( - Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset); result->SetI(obj->IdentityHashCode()); } @@ -537,8 +518,7 @@ static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_ca } void UnstartedRuntime::UnstartedDexCacheGetDexNative( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { // We will create the Dex object, but the image writer will release it before creating the // art file. mirror::Object* src = shadow_frame->GetVRegReference(arg_offset); @@ -600,26 +580,22 @@ static void UnstartedMemoryPeek( } void UnstartedRuntime::UnstartedMemoryPeekByte( - Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset); } void UnstartedRuntime::UnstartedMemoryPeekShort( - Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset); } void UnstartedRuntime::UnstartedMemoryPeekInt( - Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset); } void UnstartedRuntime::UnstartedMemoryPeekLong( - Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset); } @@ -673,18 +649,14 @@ static void UnstartedMemoryPeekArray( } void UnstartedRuntime::UnstartedMemoryPeekByteArray( - Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) { UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset); } // This allows reading security.properties in an unstarted runtime and initialize Security. void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader( - Thread* self, - ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, - JValue* result, - size_t arg_offset ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, JValue* result, + size_t arg_offset ATTRIBUTE_UNUSED) { Runtime* runtime = Runtime::Current(); const std::vector<const DexFile*>& path = runtime->GetClassLinker()->GetBootClassPath(); std::string canonical(DexFile::GetDexCanonicalLocation(path[0]->GetLocation().c_str())); @@ -753,8 +725,9 @@ void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader( return; } - mirror::ArtMethod* constructor = h_class->FindDeclaredDirectMethod("<init>", - "(Ljava/lang/String;)V"); + auto* cl = Runtime::Current()->GetClassLinker(); + ArtMethod* constructor = h_class->FindDeclaredDirectMethod( + "<init>", "(Ljava/lang/String;)V", cl->GetImagePointerSize()); if (constructor == nullptr) { AbortTransactionOrFail(self, "Could not find StringReader constructor"); return; @@ -774,8 +747,7 @@ void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader( // This allows reading the new style of String objects during compilation. void UnstartedRuntime::UnstartedStringGetCharsNoCheck( - Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) { jint start = shadow_frame->GetVReg(arg_offset + 1); jint end = shadow_frame->GetVReg(arg_offset + 2); jint index = shadow_frame->GetVReg(arg_offset + 4); @@ -787,7 +759,8 @@ void UnstartedRuntime::UnstartedStringGetCharsNoCheck( DCHECK_GE(start, 0); DCHECK_GE(end, string->GetLength()); StackHandleScope<1> hs(self); - Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray())); + Handle<mirror::CharArray> h_char_array( + hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray())); DCHECK_LE(index, h_char_array->GetLength()); DCHECK_LE(end - start, h_char_array->GetLength() - index); string->GetChars(start, end, h_char_array, index); @@ -795,8 +768,7 @@ void UnstartedRuntime::UnstartedStringGetCharsNoCheck( // This allows reading chars from the new style of String objects during compilation. void UnstartedRuntime::UnstartedStringCharAt( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { jint index = shadow_frame->GetVReg(arg_offset + 1); mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString(); if (string == nullptr) { @@ -808,8 +780,7 @@ void UnstartedRuntime::UnstartedStringCharAt( // This allows setting chars from the new style of String objects during compilation. void UnstartedRuntime::UnstartedStringSetCharAt( - Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) { jint index = shadow_frame->GetVReg(arg_offset + 1); jchar c = shadow_frame->GetVReg(arg_offset + 2); mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString(); @@ -822,13 +793,13 @@ void UnstartedRuntime::UnstartedStringSetCharAt( // This allows creating the new style of String objects during compilation. void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { jint offset = shadow_frame->GetVReg(arg_offset); jint char_count = shadow_frame->GetVReg(arg_offset + 1); DCHECK_GE(char_count, 0); StackHandleScope<1> hs(self); - Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray())); + Handle<mirror::CharArray> h_char_array( + hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray())); Runtime* runtime = Runtime::Current(); gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator(); result->SetL(mirror::String::AllocFromCharArray<true>(self, char_count, h_char_array, offset, allocator)); @@ -836,8 +807,7 @@ void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars( // This allows creating the new style of String objects during compilation. void UnstartedRuntime::UnstartedStringFactoryNewStringFromString( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { mirror::String* to_copy = shadow_frame->GetVRegReference(arg_offset)->AsString(); if (to_copy == nullptr) { AbortTransactionOrFail(self, "StringFactory.newStringFromString with null object"); @@ -852,14 +822,14 @@ void UnstartedRuntime::UnstartedStringFactoryNewStringFromString( } void UnstartedRuntime::UnstartedStringFastSubstring( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { jint start = shadow_frame->GetVReg(arg_offset + 1); jint length = shadow_frame->GetVReg(arg_offset + 2); DCHECK_GE(start, 0); DCHECK_GE(length, 0); StackHandleScope<1> hs(self); - Handle<mirror::String> h_string(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString())); + Handle<mirror::String> h_string( + hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString())); DCHECK_LE(start, h_string->GetLength()); DCHECK_LE(start + length, h_string->GetLength()); Runtime* runtime = Runtime::Current(); @@ -879,12 +849,9 @@ void UnstartedRuntime::UnstartedStringToCharArray( result->SetL(string->ToCharArray(self)); } -void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args, JValue* result) { int32_t length = args[1]; DCHECK_GE(length, 0); mirror::Class* element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass(); @@ -896,20 +863,15 @@ void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self, array_class->GetComponentSizeShift(), allocator)); } -void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) { +void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { result->SetL(nullptr); } -void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { NthCallerVisitor visitor(self, 3); visitor.WalkStack(); if (visitor.caller != nullptr) { @@ -917,76 +879,56 @@ void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self, } } -void UnstartedRuntime::UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) { +void UnstartedRuntime::UnstartedJNIMathLog( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { JValue value; value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]); result->SetD(log(value.GetD())); } -void UnstartedRuntime::UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) { +void UnstartedRuntime::UnstartedJNIMathExp( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { JValue value; value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]); result->SetD(exp(value.GetD())); } -void UnstartedRuntime::UnstartedJNIClassGetNameNative(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIClassGetNameNative( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { StackHandleScope<1> hs(self); result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass()))); } -void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) { +void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { result->SetI(args[0]); } -void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) { +void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { result->SetI(args[0]); } -void UnstartedRuntime::UnstartedJNIObjectInternalClone(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIObjectInternalClone( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { result->SetL(receiver->Clone(self)); } -void UnstartedRuntime::UnstartedJNIObjectNotifyAll(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIObjectNotifyAll( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result ATTRIBUTE_UNUSED) { receiver->NotifyAll(self); } -void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIStringCompareTo( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, uint32_t* args, + JValue* result) { mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString(); if (rhs == nullptr) { AbortTransactionOrFail(self, "String.compareTo with null object"); @@ -994,42 +936,30 @@ void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self, result->SetI(receiver->AsString()->CompareTo(rhs)); } -void UnstartedRuntime::UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIStringIntern( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { result->SetL(receiver->AsString()->Intern()); } -void UnstartedRuntime::UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIStringFastIndexOf( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, + uint32_t* args, JValue* result) { result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1])); } -void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args, JValue* result) { StackHandleScope<2> hs(self); auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass())); auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray())); result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions)); } -void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args, JValue* result) { int32_t length = static_cast<int32_t>(args[1]); if (length < 0) { ThrowNegativeArraySizeException(length); @@ -1049,12 +979,9 @@ void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self, result->SetL(new_array); } -void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(Thread* self, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace( + Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { ScopedObjectAccessUnchecked soa(self); if (Runtime::Current()->IsActiveTransaction()) { result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<true>(soa))); @@ -1063,30 +990,22 @@ void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(Thread* self, } } -void UnstartedRuntime::UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNISystemIdentityHashCode( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]); result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0); } -void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args ATTRIBUTE_UNUSED, - JValue* result) { +void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { result->SetZ(JNI_TRUE); } -void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]); jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1]; jint expectedValue = args[3]; @@ -1102,12 +1021,9 @@ void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUT result->SetZ(success ? JNI_TRUE : JNI_FALSE); } -void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void UnstartedRuntime::UnstartedJNIUnsafePutObject( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result ATTRIBUTE_UNUSED) { mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]); jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1]; mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]); @@ -1119,24 +1035,16 @@ void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED } void UnstartedRuntime::UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType( - Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass(); Primitive::Type primitive_type = component->GetPrimitiveType(); result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value()); } void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType( - Thread* self ATTRIBUTE_UNUSED, - mirror::ArtMethod* method ATTRIBUTE_UNUSED, - mirror::Object* receiver ATTRIBUTE_UNUSED, - uint32_t* args, - JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass(); Primitive::Type primitive_type = component->GetPrimitiveType(); result->SetI(Primitive::ComponentSize(primitive_type)); @@ -1145,7 +1053,7 @@ void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType( typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_size); -typedef void (*JNIHandler)(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver, +typedef void (*JNIHandler)(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result); static bool tables_initialized_ = false; @@ -1200,7 +1108,7 @@ void UnstartedRuntime::Invoke(Thread* self, const DexFile::CodeItem* code_item, } // Hand select a number of methods to be run in a not yet started runtime without using JNI. -void UnstartedRuntime::Jni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver, +void UnstartedRuntime::Jni(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) { std::string name(PrettyMethod(method)); const auto& iter = jni_handlers_.find(name); diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h index a361af0542..a357d5fa18 100644 --- a/runtime/interpreter/unstarted_runtime.h +++ b/runtime/interpreter/unstarted_runtime.h @@ -24,14 +24,12 @@ namespace art { +class ArtMethod; class Thread; class ShadowFrame; namespace mirror { - -class ArtMethod; class Object; - } // namespace mirror namespace interpreter { @@ -57,7 +55,7 @@ class UnstartedRuntime { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Jni(Thread* self, - mirror::ArtMethod* method, + ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) @@ -80,7 +78,7 @@ class UnstartedRuntime { // Methods that are native. #define UNSTARTED_JNI(ShortName, SigIgnored) \ static void UnstartedJNI ## ShortName(Thread* self, \ - mirror::ArtMethod* method, \ + ArtMethod* method, \ mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h index 8f6014c2b9..047e906614 100644 --- a/runtime/interpreter/unstarted_runtime_list.h +++ b/runtime/interpreter/unstarted_runtime_list.h @@ -31,7 +31,6 @@ V(SystemArraycopyInt, "void java.lang.System.arraycopy(int[], int, int[], int, int)") \ V(ThreadLocalGet, "java.lang.Object java.lang.ThreadLocal.get()") \ V(MathCeil, "double java.lang.Math.ceil(double)") \ - V(ArtMethodGetMethodName, "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") \ V(ObjectHashCode, "int java.lang.Object.hashCode()") \ V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \ V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \ diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 9179d17bf9..4b672e06f4 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -52,9 +52,9 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { #undef UNSTARTED_DIRECT // Methods that are native. -#define UNSTARTED_JNI(Name, SigIgnored) \ +#define UNSTARTED_JNI(Name, SigIgnored) \ static void UnstartedJNI ## Name(Thread* self, \ - mirror::ArtMethod* method, \ + ArtMethod* method, \ mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ @@ -253,7 +253,8 @@ TEST_F(UnstartedRuntimeTest, StringInit) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); mirror::Class* klass = mirror::String::GetJavaLangString(); - mirror::ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V"); + ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V", + sizeof(void*)); // create instruction data for invoke-direct {v0, v1} of method with fake index uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 }; diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index 3142089862..2d3d19ce3e 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -18,6 +18,7 @@ #include <dlfcn.h> +#include "art_method.h" #include "base/dumpable.h" #include "base/mutex.h" #include "base/stl_util.h" @@ -25,7 +26,6 @@ #include "dex_file-inl.h" #include "fault_handler.h" #include "indirect_reference_table-inl.h" -#include "mirror/art_method.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "nativebridge/native_bridge.h" @@ -205,7 +205,7 @@ class Libraries { } // See section 11.3 "Linking Native Methods" of the JNI spec. - void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail) + void* FindNativeMethod(ArtMethod* m, std::string& detail) EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { std::string jni_short_name(JniShortName(m)); @@ -386,7 +386,7 @@ JavaVMExt::~JavaVMExt() { void JavaVMExt::JniAbort(const char* jni_function_name, const char* msg) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); - mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr); + ArtMethod* current_method = self->GetCurrentMethod(nullptr); std::ostringstream os; os << "JNI DETECTED ERROR IN APPLICATION: " << msg; @@ -424,7 +424,7 @@ void JavaVMExt::JniAbortF(const char* jni_function_name, const char* fmt, ...) { va_end(args); } -bool JavaVMExt::ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool JavaVMExt::ShouldTrace(ArtMethod* method) { // Fast where no tracing is enabled. if (trace_.empty() && !VLOG_IS_ON(third_party_jni)) { return false; @@ -737,7 +737,7 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject return was_successful; } -void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) { +void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) { CHECK(m->IsNative()); mirror::Class* c = m->GetDeclaringClass(); // If this is a static method, it could be called before the class has been initialized. diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 594027c3c9..4fdf45a03e 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -27,10 +27,10 @@ namespace art { namespace mirror { - class ArtMethod; class Array; } // namespace mirror +class ArtMethod; class Libraries; class ParsedOptions; class Runtime; @@ -77,7 +77,7 @@ class JavaVMExt : public JavaVM { // such as NewByteArray. // If -verbose:third-party-jni is on, we want to log any JNI function calls // made by a third-party native method. - bool ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /** * Loads the given shared library. 'path' is an absolute pathname. @@ -92,7 +92,7 @@ class JavaVMExt : public JavaVM { * Returns a pointer to the code for the native method 'm', found * using dlsym(3) on every native library that's been loaded so far. */ - void* FindCodeForNativeMethod(mirror::ArtMethod* m) + void* FindCodeForNativeMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index 55441c9f39..e18d10fa0a 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -34,11 +34,11 @@ struct iovec; namespace art { class ArtField; +class ArtMethod; union JValue; class Thread; namespace mirror { - class ArtMethod; class Class; class Object; class Throwable; @@ -74,7 +74,7 @@ static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expa static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); } struct EventLocation { - mirror::ArtMethod* method; + ArtMethod* method; uint32_t dex_pc; }; diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index ff75268daa..612af8bc99 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -22,6 +22,7 @@ #include <unistd.h> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/stringprintf.h" #include "debugger.h" diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 5dc739edb2..bc9545b5a8 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -18,11 +18,11 @@ #include <dlfcn.h> +#include "art_method-inl.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" #include "jit_code_cache.h" #include "jit_instrumentation.h" -#include "mirror/art_method-inl.h" #include "runtime.h" #include "runtime_options.h" #include "thread_list.h" @@ -100,7 +100,7 @@ bool Jit::LoadCompiler(std::string* error_msg) { *error_msg = "JIT couldn't find jit_unload entry point"; return false; } - jit_compile_method_ = reinterpret_cast<bool (*)(void*, mirror::ArtMethod*, Thread*)>( + jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*)>( dlsym(jit_library_handle_, "jit_compile_method")); if (jit_compile_method_ == nullptr) { dlclose(jit_library_handle_); @@ -126,7 +126,7 @@ bool Jit::LoadCompiler(std::string* error_msg) { return true; } -bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) { +bool Jit::CompileMethod(ArtMethod* method, Thread* self) { DCHECK(!method->IsRuntimeMethod()); if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) { VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint"; diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 8f92453866..dbd8977d91 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -30,13 +30,10 @@ namespace art { +class ArtMethod; class CompilerCallbacks; struct RuntimeArgumentMap; -namespace mirror { -class ArtMethod; -} // namespace mirror - namespace jit { class JitCodeCache; @@ -50,7 +47,7 @@ class Jit { virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); - bool CompileMethod(mirror::ArtMethod* method, Thread* self) + bool CompileMethod(ArtMethod* method, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateInstrumentationCache(size_t compile_threshold); void CreateThreadPool(); @@ -79,7 +76,7 @@ class Jit { void* jit_compiler_handle_; void* (*jit_load_)(CompilerCallbacks**); void (*jit_unload_)(void*); - bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*); + bool (*jit_compile_method_)(void*, ArtMethod*, Thread*); // Performance monitoring. bool dump_info_on_shutdown_; diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 4d367e01eb..cd5f4cb529 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -18,8 +18,8 @@ #include <sstream> +#include "art_method-inl.h" #include "mem_map.h" -#include "mirror/art_method-inl.h" #include "oat_file-inl.h" namespace art { @@ -58,7 +58,7 @@ JitCodeCache::JitCodeCache(MemMap* mem_map) code_cache_end_ = mem_map->End(); } -bool JitCodeCache::ContainsMethod(mirror::ArtMethod* method) const { +bool JitCodeCache::ContainsMethod(ArtMethod* method) const { return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode()); } @@ -93,7 +93,7 @@ uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const ui return data_cache_ptr_ - size; } -const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) { +const void* JitCodeCache::GetCodeFor(ArtMethod* method) { const void* code = method->GetEntryPointFromQuickCompiledCode(); if (ContainsCodePtr(code)) { return code; @@ -106,7 +106,7 @@ const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) { return nullptr; } -void JitCodeCache::SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) { +void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) { DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr); DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr=" << old_code_ptr; diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 8b76647161..c1ea921834 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -31,13 +31,10 @@ namespace art { +class ArtMethod; class CompiledMethod; class CompilerCallbacks; -namespace mirror { -class ArtMethod; -} // namespcae mirror - namespace jit { class JitInstrumentationCache; @@ -80,7 +77,7 @@ class JitCodeCache { } // Return true if the code cache contains the code pointer which si the entrypoint of the method. - bool ContainsMethod(mirror::ArtMethod* method) const + bool ContainsMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Return true if the code cache contains a code ptr. @@ -95,12 +92,12 @@ class JitCodeCache { LOCKS_EXCLUDED(lock_); // Get code for a method, returns null if it is not in the jit cache. - const void* GetCodeFor(mirror::ArtMethod* method) + const void* GetCodeFor(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the // entrypoint isn't within the cache. - void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) + void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); private: @@ -125,10 +122,9 @@ class JitCodeCache { const uint8_t* data_cache_begin_; const uint8_t* data_cache_end_; size_t num_methods_; - // TODO: This relies on methods not moving. // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks. - SafeMap<mirror::ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_); + SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_); DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache); }; diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc index afa5a3e7ee..cd123b97d9 100644 --- a/runtime/jit/jit_code_cache_test.cc +++ b/runtime/jit/jit_code_cache_test.cc @@ -16,9 +16,9 @@ #include "common_runtime_test.h" +#include "art_method-inl.h" #include "class_linker.h" #include "jit_code_cache.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "thread-inl.h" @@ -50,15 +50,15 @@ TEST_F(JitCodeCacheTest, TestCoverage) { ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code)); ASSERT_EQ(code_cache->NumMethods(), 1u); ClassLinker* const cl = Runtime::Current()->GetClassLinker(); - auto h_method = hs.NewHandle(cl->AllocArtMethod(soa.Self())); - ASSERT_FALSE(code_cache->ContainsMethod(h_method.Get())); - h_method->SetEntryPointFromQuickCompiledCode(reserved_code); - ASSERT_TRUE(code_cache->ContainsMethod(h_method.Get())); - ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code); + auto* method = cl->AllocArtMethodArray(soa.Self(), 1); + ASSERT_FALSE(code_cache->ContainsMethod(method)); + method->SetEntryPointFromQuickCompiledCode(reserved_code); + ASSERT_TRUE(code_cache->ContainsMethod(method)); + ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code); // Save the code and then change it. - code_cache->SaveCompiledCode(h_method.Get(), reserved_code); - h_method->SetEntryPointFromQuickCompiledCode(nullptr); - ASSERT_EQ(code_cache->GetCodeFor(h_method.Get()), reserved_code); + code_cache->SaveCompiledCode(method, reserved_code); + method->SetEntryPointFromQuickCompiledCode(nullptr); + ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code); const uint8_t data_arr[] = {1, 2, 3, 4, 5}; uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr)); ASSERT_TRUE(data_ptr != nullptr); @@ -76,7 +76,8 @@ TEST_F(JitCodeCacheTest, TestOverflow) { size_t data_bytes = 0; constexpr size_t kCodeArrSize = 4 * KB; constexpr size_t kDataArrSize = 4 * KB; - uint8_t data_arr[kDataArrSize] = {53}; + uint8_t data_arr[kDataArrSize]; + std::fill_n(data_arr, arraysize(data_arr), 53); // Add code and data until we are full. uint8_t* code_ptr = nullptr; uint8_t* data_ptr = nullptr; diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc index 32326740c6..1e56cdca30 100644 --- a/runtime/jit/jit_instrumentation.cc +++ b/runtime/jit/jit_instrumentation.cc @@ -16,9 +16,9 @@ #include "jit_instrumentation.h" +#include "art_method-inl.h" #include "jit.h" #include "jit_code_cache.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" namespace art { @@ -26,7 +26,7 @@ namespace jit { class JitCompileTask : public Task { public: - explicit JitCompileTask(mirror::ArtMethod* method, JitInstrumentationCache* cache) + explicit JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache) : method_(method), cache_(cache) { } @@ -45,7 +45,7 @@ class JitCompileTask : public Task { } private: - mirror::ArtMethod* const method_; + ArtMethod* const method_; JitInstrumentationCache* const cache_; DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask); @@ -63,7 +63,7 @@ void JitInstrumentationCache::DeleteThreadPool() { thread_pool_.reset(); } -void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* method) { +void JitInstrumentationCache::SignalCompiled(Thread* self, ArtMethod* method) { ScopedObjectAccessUnchecked soa(self); jmethodID method_id = soa.EncodeMethod(method); MutexLock mu(self, lock_); @@ -73,7 +73,7 @@ void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* me } } -void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method, size_t count) { +void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t count) { ScopedObjectAccessUnchecked soa(self); // Since we don't have on-stack replacement, some methods can remain in the interpreter longer // than we want resulting in samples even after the method is compiled. @@ -101,11 +101,13 @@ void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method } if (is_hot) { if (thread_pool_.get() != nullptr) { - thread_pool_->AddTask(self, new JitCompileTask(method->GetInterfaceMethodIfProxy(), this)); + thread_pool_->AddTask(self, new JitCompileTask( + method->GetInterfaceMethodIfProxy(sizeof(void*)), this)); thread_pool_->StartWorkers(self); } else { VLOG(jit) << "Compiling hot method " << PrettyMethod(method); - Runtime::Current()->GetJit()->CompileMethod(method->GetInterfaceMethodIfProxy(), self); + Runtime::Current()->GetJit()->CompileMethod( + method->GetInterfaceMethodIfProxy(sizeof(void*)), self); } } } diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h index 72acaef2a3..27894eb6c2 100644 --- a/runtime/jit/jit_instrumentation.h +++ b/runtime/jit/jit_instrumentation.h @@ -31,12 +31,12 @@ namespace art { namespace mirror { - class ArtMethod; class Class; class Object; class Throwable; } // namespace mirror class ArtField; +class ArtMethod; union JValue; class Thread; @@ -46,9 +46,9 @@ namespace jit { class JitInstrumentationCache { public: explicit JitInstrumentationCache(size_t hot_method_threshold); - void AddSamples(Thread* self, mirror::ArtMethod* method, size_t samples) + void AddSamples(Thread* self, ArtMethod* method, size_t samples) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SignalCompiled(Thread* self, mirror::ArtMethod* method) + void SignalCompiled(Thread* self, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateThreadPool(); void DeleteThreadPool(); @@ -67,31 +67,31 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen explicit JitInstrumentationListener(JitInstrumentationCache* cache); virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/, - mirror::ArtMethod* method, uint32_t /*dex_pc*/) + ArtMethod* method, uint32_t /*dex_pc*/) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { instrumentation_cache_->AddSamples(thread, method, 1); } virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/, - mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/, + ArtMethod* /*method*/, uint32_t /*dex_pc*/, const JValue& /*return_value*/) OVERRIDE { } virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/, - mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { } + ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { } virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/, - mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/, + ArtMethod* /*method*/, uint32_t /*dex_pc*/, ArtField* /*field*/) OVERRIDE { } virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/, - mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/, + ArtMethod* /*method*/, uint32_t /*dex_pc*/, ArtField* /*field*/, const JValue& /*field_value*/) OVERRIDE { } virtual void ExceptionCaught(Thread* /*thread*/, mirror::Throwable* /*exception_object*/) OVERRIDE { } virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/, - mirror::ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { } + ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { } // We only care about how many dex instructions were executed in the Jit. - virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset) + virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { CHECK_LE(dex_pc_offset, 0); instrumentation_cache_->AddSamples(thread, method, 1); diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index f435467e95..6ab44551d0 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -24,6 +24,7 @@ #include <vector> #include "art_field-inl.h" +#include "art_method-inl.h" #include "atomic.h" #include "base/allocator.h" #include "base/logging.h" @@ -38,7 +39,6 @@ #include "interpreter/interpreter.h" #include "jni_env_ext.h" #include "java_vm_ext.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/field-inl.h" @@ -126,17 +126,18 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, if (c == nullptr) { return nullptr; } - mirror::ArtMethod* method = nullptr; + ArtMethod* method = nullptr; + auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); if (is_static) { - method = c->FindDirectMethod(name, sig); + method = c->FindDirectMethod(name, sig, pointer_size); } else if (c->IsInterface()) { - method = c->FindInterfaceMethod(name, sig); + method = c->FindInterfaceMethod(name, sig, pointer_size); } else { - method = c->FindVirtualMethod(name, sig); + method = c->FindVirtualMethod(name, sig, pointer_size); if (method == nullptr) { // No virtual method matching the signature. Search declared // private methods and constructors. - method = c->FindDeclaredDirectMethod(name, sig); + method = c->FindDeclaredDirectMethod(name, sig, pointer_size); } } if (method == nullptr || method->IsStatic() != is_static) { @@ -148,7 +149,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr); + ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr); // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set. if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { return soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride()); @@ -312,26 +313,19 @@ static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) { } template <bool kNative> -static mirror::ArtMethod* FindMethod(mirror::Class* c, - const StringPiece& name, - const StringPiece& sig) +static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (size_t i = 0; i < c->NumDirectMethods(); ++i) { - mirror::ArtMethod* method = c->GetDirectMethod(i); - if (kNative == method->IsNative() && - name == method->GetName() && method->GetSignature() == sig) { - return method; + auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& method : c->GetDirectMethods(pointer_size)) { + if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) { + return &method; } } - - for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { - mirror::ArtMethod* method = c->GetVirtualMethod(i); - if (kNative == method->IsNative() && - name == method->GetName() && method->GetSignature() == sig) { - return method; + for (auto& method : c->GetVirtualMethods(pointer_size)) { + if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) { + return &method; } } - return nullptr; } @@ -366,7 +360,7 @@ class JNI { static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) { CHECK_NON_NULL_ARGUMENT(jlr_method); ScopedObjectAccess soa(env); - return soa.EncodeMethod(mirror::ArtMethod::FromReflectedMethod(soa, jlr_method)); + return soa.EncodeMethod(ArtMethod::FromReflectedMethod(soa, jlr_method)); } static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) { @@ -384,8 +378,7 @@ class JNI { static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) { CHECK_NON_NULL_ARGUMENT(mid); ScopedObjectAccess soa(env); - mirror::ArtMethod* m = soa.DecodeMethod(mid); - CHECK(!kMovingMethods); + ArtMethod* m = soa.DecodeMethod(mid); mirror::AbstractMethod* method; if (m->IsConstructor()) { method = mirror::Constructor::CreateFromArtMethod(soa.Self(), m); @@ -2151,7 +2144,7 @@ class JNI { // Note: the right order is to try to find the method locally // first, either as a direct or a virtual method. Then move to // the parent. - mirror::ArtMethod* m = nullptr; + ArtMethod* m = nullptr; bool warn_on_going_to_parent = down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled(); for (mirror::Class* current_class = c; current_class != nullptr; @@ -2207,17 +2200,16 @@ class JNI { VLOG(jni) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]"; size_t unregistered_count = 0; - for (size_t i = 0; i < c->NumDirectMethods(); ++i) { - mirror::ArtMethod* m = c->GetDirectMethod(i); - if (m->IsNative()) { - m->UnregisterNative(); + auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& m : c->GetDirectMethods(pointer_size)) { + if (m.IsNative()) { + m.UnregisterNative(); unregistered_count++; } } - for (size_t i = 0; i < c->NumVirtualMethods(); ++i) { - mirror::ArtMethod* m = c->GetVirtualMethod(i); - if (m->IsNative()) { - m->UnregisterNative(); + for (auto& m : c->GetVirtualMethods(pointer_size)) { + if (m.IsNative()) { + m.UnregisterNative(); unregistered_count++; } } diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc index 581ef0e61a..99eb365094 100644 --- a/runtime/jni_internal_test.cc +++ b/runtime/jni_internal_test.cc @@ -16,9 +16,9 @@ #include "jni_internal.h" +#include "art_method-inl.h" #include "common_compiler_test.h" #include "java_vm_ext.h" -#include "mirror/art_method-inl.h" #include "mirror/string-inl.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc index fe6bee6d02..43e81d9d94 100644 --- a/runtime/linear_alloc.cc +++ b/runtime/linear_alloc.cc @@ -23,6 +23,11 @@ namespace art { LinearAlloc::LinearAlloc(ArenaPool* pool) : lock_("linear alloc"), allocator_(pool) { } +void* LinearAlloc::Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) { + MutexLock mu(self, lock_); + return allocator_.Realloc(ptr, old_size, new_size); +} + void* LinearAlloc::Alloc(Thread* self, size_t size) { MutexLock mu(self, lock_); return allocator_.Alloc(size); @@ -33,4 +38,14 @@ size_t LinearAlloc::GetUsedMemory() const { return allocator_.BytesUsed(); } +ArenaPool* LinearAlloc::GetArenaPool() { + MutexLock mu(Thread::Current(), lock_); + return allocator_.GetArenaPool(); +} + +bool LinearAlloc::Contains(void* ptr) const { + MutexLock mu(Thread::Current(), lock_); + return allocator_.Contains(ptr); +} + } // namespace art diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h index fcabcc803e..c10ddfda9f 100644 --- a/runtime/linear_alloc.h +++ b/runtime/linear_alloc.h @@ -28,7 +28,10 @@ class LinearAlloc { public: explicit LinearAlloc(ArenaPool* pool); - void* Alloc(Thread* self, size_t size); + void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_); + + // Realloc never frees the input pointer, it is the caller's job to do this if necessary. + void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_); // Allocate and construct an array of structs of type T. template<class T> @@ -37,7 +40,12 @@ class LinearAlloc { } // Return the number of bytes used in the allocator. - size_t GetUsedMemory() const; + size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_); + + ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_); + + // Return true if the linear alloc contrains an address. + bool Contains(void* ptr) const; private: mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 81c656b6fa..91a98707c4 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -16,14 +16,14 @@ #include "abstract_method.h" -#include "mirror/art_method-inl.h" +#include "art_method-inl.h" namespace art { namespace mirror { -bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) { - auto* interface_method = method->GetInterfaceMethodIfProxy(); - SetFieldObject<false>(ArtMethodOffset(), method); +bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) { + auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); + SetArtMethod(method); SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass()); SetFieldObject<false>( DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass()); @@ -32,8 +32,12 @@ bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) { return true; } -mirror::ArtMethod* AbstractMethod::GetArtMethod() { - return GetFieldObject<mirror::ArtMethod>(ArtMethodOffset()); +ArtMethod* AbstractMethod::GetArtMethod() { + return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset())); +} + +void AbstractMethod::SetArtMethod(ArtMethod* method) { + SetField64<false>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method)); } mirror::Class* AbstractMethod::GetDeclaringClass() { diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index ef51d7f4ae..99d697a6d3 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -26,18 +26,19 @@ namespace art { struct AbstractMethodOffsets; +class ArtMethod; namespace mirror { -class ArtMethod; - // C++ mirror of java.lang.reflect.AbstractMethod. class MANAGED AbstractMethod : public AccessibleObject { public: // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. - bool CreateFromArtMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Only used by the image writer. + void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: @@ -57,9 +58,10 @@ class MANAGED AbstractMethod : public AccessibleObject { return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_)); } - HeapReference<mirror::ArtMethod> art_method_; HeapReference<mirror::Class> declaring_class_; HeapReference<mirror::Class> declaring_class_of_overridden_method_; + uint32_t padding_; + uint64_t art_method_; uint32_t access_flags_; uint32_t dex_method_index_; diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index e93717e6f3..d343292ca3 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -20,19 +20,19 @@ #include "array.h" #include "base/bit_utils.h" +#include "base/casts.h" #include "base/logging.h" #include "base/stringprintf.h" -#include "base/casts.h" -#include "class.h" +#include "class-inl.h" #include "gc/heap-inl.h" #include "thread.h" namespace art { namespace mirror { -inline uint32_t Array::ClassSize() { +inline uint32_t Array::ClassSize(size_t pointer_size) { uint32_t vtable_entries = Object::kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> @@ -371,6 +371,30 @@ inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, i } } +template<typename T> +inline T PointerArray::GetElementPtrSize(uint32_t idx, size_t ptr_size) { + // C style casts here since we sometimes have T be a pointer, or sometimes an integer + // (for stack traces). + if (ptr_size == 8) { + return (T)static_cast<uintptr_t>(AsLongArray()->GetWithoutChecks(idx)); + } + DCHECK_EQ(ptr_size, 4u); + return (T)static_cast<uintptr_t>(AsIntArray()->GetWithoutChecks(idx)); +} + +template<bool kTransactionActive, bool kUnchecked, typename T> +inline void PointerArray::SetElementPtrSize(uint32_t idx, T element, size_t ptr_size) { + if (ptr_size == 8) { + (kUnchecked ? down_cast<LongArray*>(static_cast<Object*>(this)) : AsLongArray())-> + SetWithoutChecks<kTransactionActive>(idx, (uint64_t)(element)); + } else { + DCHECK_EQ(ptr_size, 4u); + DCHECK_LE((uintptr_t)element, 0xFFFFFFFFu); + (kUnchecked ? down_cast<IntArray*>(static_cast<Object*>(this)) : AsIntArray()) + ->SetWithoutChecks<kTransactionActive>(idx, static_cast<uint32_t>((uintptr_t)element)); + } +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index b92f01797a..d72c03ff86 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -125,6 +125,26 @@ void Array::ThrowArrayStoreException(Object* object) { art::ThrowArrayStoreException(object->GetClass(), this->GetClass()); } +Array* Array::CopyOf(Thread* self, int32_t new_length) { + CHECK(GetClass()->GetComponentType()->IsPrimitive()) << "Will miss write barriers"; + DCHECK_GE(new_length, 0); + // We may get copied by a compacting GC. + StackHandleScope<1> hs(self); + auto h_this(hs.NewHandle(this)); + auto* heap = Runtime::Current()->GetHeap(); + gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() : + heap->GetCurrentNonMovingAllocator(); + const auto component_size = GetClass()->GetComponentSize(); + const auto component_shift = GetClass()->GetComponentSizeShift(); + Array* new_array = Alloc<true>(self, GetClass(), new_length, component_shift, allocator_type); + if (LIKELY(new_array != nullptr)) { + memcpy(new_array->GetRawData(component_size, 0), h_this->GetRawData(component_size, 0), + std::min(h_this->GetLength(), new_length) << component_shift); + } + return new_array; +} + + template <typename T> GcRoot<Class> PrimitiveArray<T>::array_class_; // Explicitly instantiate all the primitive array types. diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index 832ad68dcd..c4f6c84e83 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -31,7 +31,7 @@ namespace mirror { class MANAGED Array : public Object { public: // The size of a java.lang.Class representing an array. - static uint32_t ClassSize(); + static uint32_t ClassSize(size_t pointer_size); // Allocates an array with the given properties, if kFillUsable is true the array will be of at // least component_count size, however, if there's usable space at the end of the allocation the @@ -84,6 +84,8 @@ class MANAGED Array : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + protected: void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -174,6 +176,18 @@ class MANAGED PrimitiveArray : public Array { DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray); }; +// Either an IntArray or a LongArray. +class PointerArray : public Array { + public: + template<typename T> + T GetElementPtrSize(uint32_t idx, size_t ptr_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<bool kTransactionActive = false, bool kUnchecked = false, typename T> + void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +}; + } // namespace mirror } // namespace art diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 5752a15b8b..835b94ade4 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -20,6 +20,7 @@ #include "class.h" #include "art_field-inl.h" +#include "art_method.h" #include "art_method-inl.h" #include "class_loader.h" #include "common_throws.h" @@ -60,130 +61,157 @@ inline DexCache* Class::GetDexCache() { return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_)); } -inline ObjectArray<ArtMethod>* Class::GetDirectMethods() { +inline ArtMethod* Class::GetDirectMethodsPtr() { DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)); + return GetDirectMethodsPtrUnchecked(); } -inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>( - OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_))); - DCHECK_NE(0, new_direct_methods->GetLength()); - SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods); +inline ArtMethod* Class::GetDirectMethodsPtrUnchecked() { + return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_))); } -inline ArtMethod* Class::GetDirectMethod(int32_t i) { - return GetDirectMethods()->Get(i); +inline ArtMethod* Class::GetVirtualMethodsPtrUnchecked() { + return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_))); } -inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray<ArtMethod>* direct_methods = - GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)); - direct_methods->Set<false>(i, f); +inline void Class::SetDirectMethodsPtr(ArtMethod* new_direct_methods) { + DCHECK(GetDirectMethodsPtrUnchecked() == nullptr); + SetDirectMethodsPtrUnchecked(new_direct_methods); } -// Returns the number of static, private, and constructor methods. -inline uint32_t Class::NumDirectMethods() { - return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0; +inline void Class::SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) { + SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), + reinterpret_cast<uint64_t>(new_direct_methods)); +} + +inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetDirectMethodsPtrUnchecked(); + DCHECK(methods != nullptr); + return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + + ArtMethod::ObjectSize(pointer_size) * i); +} + +inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetDirectMethodsPtr(); + DCHECK(methods != nullptr); + return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + + ArtMethod::ObjectSize(pointer_size) * i); } template<VerifyObjectFlags kVerifyFlags> -inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() { - DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_)); +inline ArtMethod* Class::GetVirtualMethodsPtr() { + DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>()); + return GetVirtualMethodsPtrUnchecked(); } -inline void Class::SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods) { +inline void Class::SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) { // TODO: we reassign virtual methods to grow the table for miranda // methods.. they should really just be assigned once. - DCHECK_NE(0, new_virtual_methods->GetLength()); - SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), new_virtual_methods); -} - -inline uint32_t Class::NumVirtualMethods() { - return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0; + SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), + reinterpret_cast<uint64_t>(new_virtual_methods)); } template<VerifyObjectFlags kVerifyFlags> -inline ArtMethod* Class::GetVirtualMethod(uint32_t i) { +inline ArtMethod* Class::GetVirtualMethod(size_t i, size_t pointer_size) { + CheckPointerSize(pointer_size); DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>()) << PrettyClass(this) << " status=" << GetStatus(); - return GetVirtualMethods()->GetWithoutChecks(i); + return GetVirtualMethodUnchecked(i, pointer_size); } -inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) { +inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) { + CheckPointerSize(pointer_size); DCHECK(IsLoaded() || IsErroneous()); - return GetVirtualMethods()->GetWithoutChecks(i); + return GetVirtualMethodUnchecked(i, pointer_size); } -inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray<ArtMethod>* virtual_methods = - GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_)); - virtual_methods->SetWithoutChecks<false>(i, f); +inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetVirtualMethodsPtrUnchecked(); + DCHECK(methods != nullptr); + return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + + ArtMethod::ObjectSize(pointer_size) * i); } -inline ObjectArray<ArtMethod>* Class::GetVTable() { +inline PointerArray* Class::GetVTable() { DCHECK(IsResolved() || IsErroneous()); - return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_)); + return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_)); } -inline ObjectArray<ArtMethod>* Class::GetVTableDuringLinking() { +inline PointerArray* Class::GetVTableDuringLinking() { DCHECK(IsLoaded() || IsErroneous()); - return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_)); + return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_)); } -inline void Class::SetVTable(ObjectArray<ArtMethod>* new_vtable) { +inline void Class::SetVTable(PointerArray* new_vtable) { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable); } -inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i) { - uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry); - return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset)); +inline MemberOffset Class::EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size) { + DCHECK_LT(i, kImtSize); + return MemberOffset( + EmbeddedImTableOffset(pointer_size).Uint32Value() + i * ImTableEntrySize(pointer_size)); +} + +inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) { + DCHECK(ShouldHaveEmbeddedImtAndVTable()); + return GetFieldPtrWithSize<ArtMethod*>( + EmbeddedImTableEntryOffset(i, pointer_size), pointer_size); } -inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) { - uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry); - SetFieldObject<false>(MemberOffset(offset), method); +inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) { + DCHECK(ShouldHaveEmbeddedImtAndVTable()); + SetFieldPtrWithSize<false>(EmbeddedImTableEntryOffset(i, pointer_size), method, pointer_size); } inline bool Class::HasVTable() { - return (GetVTable() != nullptr) || ShouldHaveEmbeddedImtAndVTable(); + return GetVTable() != nullptr || ShouldHaveEmbeddedImtAndVTable(); } inline int32_t Class::GetVTableLength() { if (ShouldHaveEmbeddedImtAndVTable()) { return GetEmbeddedVTableLength(); } - return (GetVTable() != nullptr) ? GetVTable()->GetLength() : 0; + return GetVTable() != nullptr ? GetVTable()->GetLength() : 0; } -inline ArtMethod* Class::GetVTableEntry(uint32_t i) { +inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) { if (ShouldHaveEmbeddedImtAndVTable()) { - return GetEmbeddedVTableEntry(i); + return GetEmbeddedVTableEntry(i, pointer_size); } - return (GetVTable() != nullptr) ? GetVTable()->Get(i) : nullptr; + auto* vtable = GetVTable(); + DCHECK(vtable != nullptr); + return vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size); } inline int32_t Class::GetEmbeddedVTableLength() { - return GetField32(EmbeddedVTableLengthOffset()); + return GetField32(MemberOffset(EmbeddedVTableLengthOffset())); } inline void Class::SetEmbeddedVTableLength(int32_t len) { - SetField32<false>(EmbeddedVTableLengthOffset(), len); + SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len); } -inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i) { - uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry); - return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset)); +inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) { + return MemberOffset( + EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size)); } -inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) { - uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry); - SetFieldObject<false>(MemberOffset(offset), method); - CHECK(method == GetVTableDuringLinking()->Get(i)); +inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) { + return GetFieldPtrWithSize<ArtMethod*>(EmbeddedVTableEntryOffset(i, pointer_size), pointer_size); +} + +inline void Class::SetEmbeddedVTableEntryUnchecked( + uint32_t i, ArtMethod* method, size_t pointer_size) { + SetFieldPtrWithSize<false>(EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size); +} + +inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) { + auto* vtable = GetVTableDuringLinking(); + CHECK_EQ(method, vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size)); + SetEmbeddedVTableEntryUnchecked(i, method, pointer_size); } inline bool Class::Implements(Class* klass) { @@ -340,41 +368,43 @@ inline bool Class::IsSubClass(Class* klass) { return false; } -inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) { +inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) { Class* declaring_class = method->GetDeclaringClass(); DCHECK(declaring_class != nullptr) << PrettyClass(this); DCHECK(declaring_class->IsInterface()) << PrettyMethod(method); // TODO cache to improve lookup speed - int32_t iftable_count = GetIfTableCount(); + const int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; i++) { if (iftable->GetInterface(i) == declaring_class) { - return iftable->GetMethodArray(i)->Get(method->GetMethodIndex()); + return iftable->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>( + method->GetMethodIndex(), pointer_size); } } return nullptr; } -inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) { +inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) { DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda()); // The argument method may from a super class. // Use the index to a potentially overridden one for this instance's class. - return GetVTableEntry(method->GetMethodIndex()); + return GetVTableEntry(method->GetMethodIndex(), pointer_size); } -inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) { +inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) { DCHECK(!method->GetDeclaringClass()->IsInterface()); - return GetSuperClass()->GetVTableEntry(method->GetMethodIndex()); + return GetSuperClass()->GetVTableEntry(method->GetMethodIndex(), pointer_size); } -inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) { +inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method, + size_t pointer_size) { if (method->IsDirect()) { return method; } if (method->GetDeclaringClass()->IsInterface() && !method->IsMiranda()) { - return FindVirtualMethodForInterface(method); + return FindVirtualMethodForInterface(method, pointer_size); } - return FindVirtualMethodForVirtual(method); + return FindVirtualMethodForVirtual(method, pointer_size); } inline IfTable* Class::GetIfTable() { @@ -406,24 +436,24 @@ inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() { : ClassOffset(); } -inline MemberOffset Class::GetFirstReferenceStaticFieldOffset() { +inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) { DCHECK(IsResolved()); uint32_t base = sizeof(mirror::Class); // Static fields come after the class. if (ShouldHaveEmbeddedImtAndVTable()) { // Static fields come after the embedded tables. - base = mirror::Class::ComputeClassSize(true, GetEmbeddedVTableLength(), - 0, 0, 0, 0, 0); + base = mirror::Class::ComputeClassSize( + true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size); } return MemberOffset(base); } -inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking() { +inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) { DCHECK(IsLoaded()); uint32_t base = sizeof(mirror::Class); // Static fields come after the class. if (ShouldHaveEmbeddedImtAndVTable()) { // Static fields come after the embedded tables. base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(), - 0, 0, 0, 0, 0); + 0, 0, 0, 0, 0, pointer_size); } return MemberOffset(base); } @@ -499,14 +529,12 @@ inline uint32_t Class::GetAccessFlags() { // circularity issue during loading the names of its members DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() || IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() || - this == String::GetJavaLangString() || - this == ArtMethod::GetJavaLangReflectArtMethod()) + this == String::GetJavaLangString()) << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>() << " IsRetired=" << IsRetired<kVerifyFlags>() << " IsErroneous=" << IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() << " IsString=" << (this == String::GetJavaLangString()) - << " IsArtMethod=" << (this == ArtMethod::GetJavaLangReflectArtMethod()) << " descriptor=" << PrettyDescriptor(this); return GetField32<kVerifyFlags>(AccessFlagsOffset()); } @@ -594,20 +622,20 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, uint32_t num_16bit_static_fields, uint32_t num_32bit_static_fields, uint32_t num_64bit_static_fields, - uint32_t num_ref_static_fields) { + uint32_t num_ref_static_fields, + size_t pointer_size) { // Space used by java.lang.Class and its instance fields. uint32_t size = sizeof(Class); // Space used by embedded tables. if (has_embedded_tables) { - uint32_t embedded_imt_size = kImtSize * sizeof(ImTableEntry); - uint32_t embedded_vtable_size = num_vtable_entries * sizeof(VTableEntry); - size += embedded_imt_size + - sizeof(int32_t) /* vtable len */ + - embedded_vtable_size; + const uint32_t embedded_imt_size = kImtSize * ImTableEntrySize(pointer_size); + const uint32_t embedded_vtable_size = num_vtable_entries * VTableEntrySize(pointer_size); + size = RoundUp(size + sizeof(uint32_t) /* embedded vtable len */, pointer_size) + + embedded_imt_size + embedded_vtable_size; } // Space used by reference statics. - size += num_ref_static_fields * sizeof(HeapReference<Object>); + size += num_ref_static_fields * sizeof(HeapReference<Object>); if (!IsAligned<8>(size) && num_64bit_static_fields > 0) { uint32_t gap = 8 - (size & 0x7); size += gap; // will be padded @@ -629,10 +657,8 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, } // Guaranteed to be at least 4 byte aligned. No need for further alignments. // Space used for primitive static fields. - size += (num_8bit_static_fields * sizeof(uint8_t)) + - (num_16bit_static_fields * sizeof(uint16_t)) + - (num_32bit_static_fields * sizeof(uint32_t)) + - (num_64bit_static_fields * sizeof(uint64_t)); + size += num_8bit_static_fields * sizeof(uint8_t) + num_16bit_static_fields * sizeof(uint16_t) + + num_32bit_static_fields * sizeof(uint32_t) + num_64bit_static_fields * sizeof(uint64_t); return size; } @@ -651,40 +677,10 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) // allocated with the right size for those. Also, unresolved classes don't have fields // linked yet. VisitStaticFieldsReferences<kVisitClass>(this, visitor); - if (ShouldHaveEmbeddedImtAndVTable()) { - VisitEmbeddedImtAndVTable(visitor); - } - } -} - -template<typename Visitor> -inline void Class::VisitEmbeddedImtAndVTable(const Visitor& visitor) { - uint32_t pos = sizeof(mirror::Class); - - size_t count = kImtSize; - for (size_t i = 0; i < count; ++i) { - MemberOffset offset = MemberOffset(pos); - visitor(this, offset, true); - pos += sizeof(ImTableEntry); - } - - // Skip vtable length. - pos += sizeof(int32_t); - - count = GetEmbeddedVTableLength(); - for (size_t i = 0; i < count; ++i) { - MemberOffset offset = MemberOffset(pos); - visitor(this, offset, true); - pos += sizeof(VTableEntry); } } template<ReadBarrierOption kReadBarrierOption> -inline bool Class::IsArtMethodClass() const { - return this == ArtMethod::GetJavaLangReflectArtMethod<kReadBarrierOption>(); -} - -template<ReadBarrierOption kReadBarrierOption> inline bool Class::IsReferenceClass() const { return this == Reference::GetJavaLangRefReference<kReadBarrierOption>(); } @@ -812,27 +808,92 @@ inline ObjectArray<String>* Class::GetDexCacheStrings() { } template<class Visitor> -void mirror::Class::VisitFieldRoots(Visitor& visitor) { +void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) { ArtField* const sfields = GetSFieldsUnchecked(); // Since we visit class roots while we may be writing these fields, check against null. - // TODO: Is this safe for concurrent compaction? if (sfields != nullptr) { for (size_t i = 0, count = NumStaticFields(); i < count; ++i) { + auto* f = &sfields[i]; if (kIsDebugBuild && IsResolved()) { - CHECK_EQ(sfields[i].GetDeclaringClass(), this) << GetStatus(); + CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); } - visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier()); + f->VisitRoots(visitor); } } ArtField* const ifields = GetIFieldsUnchecked(); if (ifields != nullptr) { for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) { + auto* f = &ifields[i]; if (kIsDebugBuild && IsResolved()) { - CHECK_EQ(ifields[i].GetDeclaringClass(), this) << GetStatus(); + CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); } - visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier()); + f->VisitRoots(visitor); } } + for (auto& m : GetDirectMethods(pointer_size)) { + m.VisitRoots(visitor); + } + for (auto& m : GetVirtualMethods(pointer_size)) { + m.VisitRoots(visitor); + } +} + +inline StrideIterator<ArtMethod> Class::DirectMethodsBegin(size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetDirectMethodsPtrUnchecked(); + auto stride = ArtMethod::ObjectSize(pointer_size); + return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride); +} + +inline StrideIterator<ArtMethod> Class::DirectMethodsEnd(size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetDirectMethodsPtrUnchecked(); + auto stride = ArtMethod::ObjectSize(pointer_size); + auto count = NumDirectMethods(); + return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride); +} + +inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) { + CheckPointerSize(pointer_size); + return MakeIterationRange(DirectMethodsBegin(pointer_size), DirectMethodsEnd(pointer_size)); +} + +inline StrideIterator<ArtMethod> Class::VirtualMethodsBegin(size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetVirtualMethodsPtrUnchecked(); + auto stride = ArtMethod::ObjectSize(pointer_size); + return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride); +} + +inline StrideIterator<ArtMethod> Class::VirtualMethodsEnd(size_t pointer_size) { + CheckPointerSize(pointer_size); + auto* methods = GetVirtualMethodsPtrUnchecked(); + auto stride = ArtMethod::ObjectSize(pointer_size); + auto count = NumVirtualMethods(); + return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride); +} + +inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) { + return MakeIterationRange(VirtualMethodsBegin(pointer_size), VirtualMethodsEnd(pointer_size)); +} + +inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) { + CheckPointerSize(pointer_size); + // Round up since we want the embedded imt and vtable to be pointer size aligned in case 64 bits. + // Add 32 bits for embedded vtable length. + return MemberOffset( + RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size)); +} + +inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) { + CheckPointerSize(pointer_size); + return MemberOffset(EmbeddedImTableOffset(pointer_size).Uint32Value() + + kImtSize * ImTableEntrySize(pointer_size)); +} + +inline void Class::CheckPointerSize(size_t pointer_size) { + DCHECK(ValidPointerSize(pointer_size)) << pointer_size; + DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } } // namespace mirror diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 56c586a94d..f0b7bfddea 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -145,9 +145,10 @@ void Class::SetDexCache(DexCache* new_dex_cache) { } void Class::SetClassSize(uint32_t new_class_size) { - if (kIsDebugBuild && (new_class_size < GetClassSize())) { - DumpClass(LOG(ERROR), kDumpClassFullDetail); - CHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this); + if (kIsDebugBuild && new_class_size < GetClassSize()) { + DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail); + LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize(); + LOG(FATAL) << " class=" << PrettyTypeOf(this); } // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size); @@ -205,10 +206,11 @@ void Class::DumpClass(std::ostream& os, int flags) { return; } - Thread* self = Thread::Current(); + Thread* const self = Thread::Current(); StackHandleScope<2> hs(self); Handle<mirror::Class> h_this(hs.NewHandle(this)); Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass())); + auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); std::string temp; os << "----- " << (IsInterface() ? "interface" : "class") << " " @@ -244,12 +246,13 @@ void Class::DumpClass(std::ostream& os, int flags) { os << " vtable (" << h_this->NumVirtualMethods() << " entries, " << (h_super.Get() != nullptr ? h_super->NumVirtualMethods() : 0) << " in super):\n"; for (size_t i = 0; i < NumVirtualMethods(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, - PrettyMethod(h_this->GetVirtualMethodDuringLinking(i)).c_str()); + os << StringPrintf(" %2zd: %s\n", i, PrettyMethod( + h_this->GetVirtualMethodDuringLinking(i, image_pointer_size)).c_str()); } os << " direct methods (" << h_this->NumDirectMethods() << " entries):\n"; for (size_t i = 0; i < h_this->NumDirectMethods(); ++i) { - os << StringPrintf(" %2zd: %s\n", i, PrettyMethod(h_this->GetDirectMethod(i)).c_str()); + os << StringPrintf(" %2zd: %s\n", i, PrettyMethod( + h_this->GetDirectMethod(i, image_pointer_size)).c_str()); } if (h_this->NumStaticFields() > 0) { os << " static fields (" << h_this->NumStaticFields() << " entries):\n"; @@ -275,7 +278,7 @@ void Class::DumpClass(std::ostream& os, int flags) { } void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) { - if (kIsDebugBuild && (new_reference_offsets != kClassWalkSuper)) { + if (kIsDebugBuild && new_reference_offsets != kClassWalkSuper) { // Sanity check that the number of bits set in the reference offset bitmap // agrees with the number of references uint32_t count = 0; @@ -342,9 +345,10 @@ void Class::SetClassLoader(ClassLoader* new_class_loader) { } } -ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) { +ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) { // Check the current class before checking the interfaces. - ArtMethod* method = FindDeclaredVirtualMethod(name, signature); + ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -352,7 +356,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { - method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature); + method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -360,9 +364,10 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece return nullptr; } -ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature) { +ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) { // Check the current class before checking the interfaces. - ArtMethod* method = FindDeclaredVirtualMethod(name, signature); + ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -370,7 +375,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { - method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature); + method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -378,9 +383,10 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& return nullptr; } -ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) { +ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) { // Check the current class before checking the interfaces. - ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } @@ -388,7 +394,8 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_me int32_t iftable_count = GetIfTableCount(); IfTable* iftable = GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { - method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + method = iftable->GetInterface(i)->FindDeclaredVirtualMethod( + dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } @@ -396,41 +403,42 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_me return nullptr; } -ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) { - for (size_t i = 0; i < NumDirectMethods(); ++i) { - ArtMethod* method = GetDirectMethod(i); - if (name == method->GetName() && method->GetSignature() == signature) { - return method; +ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) { + for (auto& method : GetDirectMethods(pointer_size)) { + if (name == method.GetName() && method.GetSignature() == signature) { + return &method; } } return nullptr; } -ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) { - for (size_t i = 0; i < NumDirectMethods(); ++i) { - ArtMethod* method = GetDirectMethod(i); - if (name == method->GetName() && signature == method->GetSignature()) { - return method; +ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) { + for (auto& method : GetDirectMethods(pointer_size)) { + if (name == method.GetName() && signature == method.GetSignature()) { + return &method; } } return nullptr; } -ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) { +ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) { if (GetDexCache() == dex_cache) { - for (size_t i = 0; i < NumDirectMethods(); ++i) { - ArtMethod* method = GetDirectMethod(i); - if (method->GetDexMethodIndex() == dex_method_idx) { - return method; + for (auto& method : GetDirectMethods(pointer_size)) { + if (method.GetDexMethodIndex() == dex_method_idx) { + return &method; } } } return nullptr; } -ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature) { +ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature); + ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -438,9 +446,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& s return nullptr; } -ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature) { +ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature); + ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -448,9 +457,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& sig return nullptr; } -ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) { +ArtMethod* Class::FindDirectMethod( + const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx); + ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } @@ -458,44 +468,44 @@ ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_metho return nullptr; } -ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) { - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - ArtMethod* method = GetVirtualMethod(i); - if (name == method->GetName() && method->GetSignature() == signature) { - return method; +ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) { + for (auto& method : GetVirtualMethods(pointer_size)) { + if (name == method.GetName() && method.GetSignature() == signature) { + return &method; } } return nullptr; } -ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature) { - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - ArtMethod* method = GetVirtualMethod(i); - if (name == method->GetName() && signature == method->GetSignature()) { - return method; +ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) { + for (auto& method : GetVirtualMethods(pointer_size)) { + if (name == method.GetName() && signature == method.GetSignature()) { + return &method; } } return nullptr; } -ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) { +ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) { if (GetDexCache() == dex_cache) { - for (size_t i = 0; i < NumVirtualMethods(); ++i) { - ArtMethod* method = GetVirtualMethod(i); - if (method->GetDexMethodIndex() == dex_method_idx && - // A miranda method may have a different DexCache and is always created by linking, - // never *declared* in the class. - !method->IsMiranda()) { - return method; + for (auto& method : GetVirtualMethods(pointer_size)) { + // A miranda method may have a different DexCache and is always created by linking, + // never *declared* in the class. + if (method.GetDexMethodIndex() == dex_method_idx && !method.IsMiranda()) { + return &method; } } } return nullptr; } -ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& signature) { +ArtMethod* Class::FindVirtualMethod( + const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature); + ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -503,9 +513,10 @@ ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece& return nullptr; } -ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& signature) { +ArtMethod* Class::FindVirtualMethod( + const StringPiece& name, const Signature& signature, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature); + ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size); if (method != nullptr) { return method; } @@ -513,9 +524,10 @@ ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& si return nullptr; } -ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) { +ArtMethod* Class::FindVirtualMethod( + const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) { for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) { - ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx); + ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size); if (method != nullptr) { return method; } @@ -523,13 +535,12 @@ ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_meth return nullptr; } -ArtMethod* Class::FindClassInitializer() { - for (size_t i = 0; i < NumDirectMethods(); ++i) { - ArtMethod* method = GetDirectMethod(i); - if (method->IsClassInitializer()) { - DCHECK_STREQ(method->GetName(), "<clinit>"); - DCHECK_STREQ(method->GetSignature().ToString().c_str(), "()V"); - return method; +ArtMethod* Class::FindClassInitializer(size_t pointer_size) { + for (ArtMethod& method : GetDirectMethods(pointer_size)) { + if (method.IsClassInitializer()) { + DCHECK_STREQ(method.GetName(), "<clinit>"); + DCHECK_STREQ(method.GetSignature().ToString().c_str(), "()V"); + return &method; } } return nullptr; @@ -684,23 +695,18 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece& return nullptr; } -static void SetPreverifiedFlagOnMethods(mirror::ObjectArray<mirror::ArtMethod>* methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (methods != nullptr) { - for (int32_t index = 0, end = methods->GetLength(); index < end; ++index) { - mirror::ArtMethod* method = methods->GetWithoutChecks(index); - DCHECK(method != nullptr); - if (!method->IsNative() && !method->IsAbstract()) { - method->SetPreverified(); - } +void Class::SetPreverifiedFlagOnAllMethods(size_t pointer_size) { + DCHECK(IsVerified()); + for (auto& m : GetDirectMethods(pointer_size)) { + if (!m.IsNative() && !m.IsAbstract()) { + m.SetPreverified(); + } + } + for (auto& m : GetVirtualMethods(pointer_size)) { + if (!m.IsNative() && !m.IsAbstract()) { + m.SetPreverified(); } } -} - -void Class::SetPreverifiedFlagOnAllMethods() { - DCHECK(IsVerified()); - SetPreverifiedFlagOnMethods(GetDirectMethods()); - SetPreverifiedFlagOnMethods(GetVirtualMethods()); } const char* Class::GetDescriptor(std::string* storage) { @@ -795,21 +801,20 @@ const DexFile::TypeList* Class::GetInterfaceTypeList() { return GetDexFile().GetInterfacesList(*class_def); } -void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope) { - for (uint32_t i = 0; i < kImtSize; i++) { - // Replace null with conflict. - mirror::Object* obj = imt_handle_scope->GetReference(i); - DCHECK(obj != nullptr); - SetEmbeddedImTableEntry(i, obj->AsArtMethod()); +void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], + size_t pointer_size) { + for (size_t i = 0; i < kImtSize; i++) { + auto method = methods[i]; + DCHECK(method != nullptr); + SetEmbeddedImTableEntry(i, method, pointer_size); } - - ObjectArray<ArtMethod>* table = GetVTableDuringLinking(); + PointerArray* table = GetVTableDuringLinking(); CHECK(table != nullptr) << PrettyClass(this); - SetEmbeddedVTableLength(table->GetLength()); - for (int32_t i = 0; i < table->GetLength(); i++) { - SetEmbeddedVTableEntry(i, table->GetWithoutChecks(i)); + const size_t table_length = table->GetLength(); + SetEmbeddedVTableLength(table_length); + for (size_t i = 0; i < table_length; i++) { + SetEmbeddedVTableEntry(i, table->GetElementPtrSize<ArtMethod*>(i, pointer_size), pointer_size); } - // Keep java.lang.Object class's vtable around for since it's easier // to be reused by array classes during their linking. if (!IsObjectClass()) { @@ -820,21 +825,20 @@ void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_ // The pre-fence visitor for Class::CopyOf(). class CopyClassVisitor { public: - explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, - size_t new_length, size_t copy_bytes, - StackHandleScope<mirror::Class::kImtSize>* imt_handle_scope) + explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length, + size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize], + size_t pointer_size) : self_(self), orig_(orig), new_length_(new_length), - copy_bytes_(copy_bytes), imt_handle_scope_(imt_handle_scope) { + copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) { } - void operator()(Object* obj, size_t usable_size) const + void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UNUSED(usable_size); StackHandleScope<1> hs(self_); Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass())); mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_); mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_); - h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_); + h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_); h_new_class_obj->SetClassSize(new_length_); } @@ -843,12 +847,13 @@ class CopyClassVisitor { Handle<mirror::Class>* const orig_; const size_t new_length_; const size_t copy_bytes_; - StackHandleScope<mirror::Class::kImtSize>* const imt_handle_scope_; + ArtMethod* const (&imt_)[mirror::Class::kImtSize]; + const size_t pointer_size_; DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor); }; Class* Class::CopyOf(Thread* self, int32_t new_length, - StackHandleScope<kImtSize>* imt_handle_scope) { + ArtMethod* const (&imt)[mirror::Class::kImtSize], size_t pointer_size) { DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class))); // We may get copied by a compacting GC. StackHandleScope<1> hs(self); @@ -856,13 +861,12 @@ Class* Class::CopyOf(Thread* self, int32_t new_length, gc::Heap* heap = Runtime::Current()->GetHeap(); // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf() // to skip copying the tail part that we will overwrite here. - CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt_handle_scope); - mirror::Object* new_class = - kMovingClasses - ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) - : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor); + CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size); + mirror::Object* new_class = kMovingClasses ? + heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) : + heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor); if (UNLIKELY(new_class == nullptr)) { - CHECK(self->IsExceptionPending()); // Expect an OOME. + self->AssertPendingOOMException(); return nullptr; } return new_class->AsClass(); @@ -873,26 +877,32 @@ bool Class::ProxyDescriptorEquals(const char* match) { return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match; } -mirror::ArtMethod* Class::GetDeclaredConstructor( +// TODO: Move this to java_lang_Class.cc? +ArtMethod* Class::GetDeclaredConstructor( Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) { - auto* direct_methods = GetDirectMethods(); - size_t count = direct_methods != nullptr ? direct_methods->GetLength() : 0u; - for (size_t i = 0; i < count; ++i) { - auto* m = direct_methods->GetWithoutChecks(i); + for (auto& m : GetDirectMethods(sizeof(void*))) { // Skip <clinit> which is a static constructor, as well as non constructors. - if (m->IsStatic() || !m->IsConstructor()) { + if (m.IsStatic() || !m.IsConstructor()) { continue; } // May cause thread suspension and exceptions. - if (m->EqualParameters(args)) { - return m; + if (m.GetInterfaceMethodIfProxy(sizeof(void*))->EqualParameters(args)) { + return &m; } - if (self->IsExceptionPending()) { + if (UNLIKELY(self->IsExceptionPending())) { return nullptr; } } return nullptr; } +uint32_t Class::Depth() { + uint32_t depth = 0; + for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) { + depth++; + } + return depth; +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index b99fc68933..ba8a693bdb 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -17,6 +17,7 @@ #ifndef ART_RUNTIME_MIRROR_CLASS_H_ #define ART_RUNTIME_MIRROR_CLASS_H_ +#include "base/iteration_range.h" #include "dex_file.h" #include "gc_root.h" #include "gc/allocator_type.h" @@ -27,6 +28,8 @@ #include "object_callbacks.h" #include "primitive.h" #include "read_barrier_option.h" +#include "stride_iterator.h" +#include "utils.h" #ifndef IMT_SIZE #error IMT_SIZE not defined @@ -35,6 +38,7 @@ namespace art { class ArtField; +class ArtMethod; struct ClassOffsets; template<class T> class Handle; template<class T> class Handle; @@ -44,7 +48,6 @@ template<size_t kNumReferences> class PACKED(4) StackHandleScope; namespace mirror { -class ArtMethod; class ClassLoader; class Constructor; class DexCache; @@ -64,16 +67,6 @@ class MANAGED Class FINAL : public Object { // (non-marker) interfaces. static constexpr size_t kImtSize = IMT_SIZE; - // imtable entry embedded in class object. - struct MANAGED ImTableEntry { - HeapReference<ArtMethod> method; - }; - - // vtable entry embedded in class object. - struct MANAGED VTableEntry { - HeapReference<ArtMethod> method; - }; - // Class Status // // kStatusRetired: Class that's temporarily used till class linking time @@ -406,13 +399,7 @@ class MANAGED Class FINAL : public Object { } // Depth of class from java.lang.Object - uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t depth = 0; - for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) { - depth++; - } - return depth; - } + uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> @@ -427,9 +414,6 @@ class MANAGED Class FINAL : public Object { bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArtMethodClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static MemberOffset ComponentTypeOffset() { @@ -469,12 +453,27 @@ class MANAGED Class FINAL : public Object { bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || - ((IsAbstract()) && IsArrayClass()); + (IsAbstract() && IsArrayClass()); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetComponentType<kVerifyFlags>() != nullptr && !GetComponentType<kVerifyFlags>()->IsPrimitive(); + return GetComponentType<kVerifyFlags>() != nullptr && + !GetComponentType<kVerifyFlags>()->IsPrimitive(); + } + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + auto* component_type = GetComponentType<kVerifyFlags>(); + return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>(); + } + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + auto* component_type = GetComponentType<kVerifyFlags>(); + return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>(); } // Creates a raw object instance but does not invoke the default constructor. @@ -517,18 +516,19 @@ class MANAGED Class FINAL : public Object { uint32_t num_16bit_static_fields, uint32_t num_32bit_static_fields, uint32_t num_64bit_static_fields, - uint32_t num_ref_static_fields); + uint32_t num_ref_static_fields, + size_t pointer_size); // The size of java.lang.Class.class. - static uint32_t ClassClassSize() { + static uint32_t ClassClassSize(size_t pointer_size) { // The number of vtable entries in java.lang.Class. - uint32_t vtable_entries = Object::kVTableLength + 66; - return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0); + uint32_t vtable_entries = Object::kVTableLength + 65; + return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0, pointer_size); } // The size of a java.lang.Class representing a primitive such as int.class. - static uint32_t PrimitiveClassSize() { - return ComputeClassSize(false, 0, 0, 0, 0, 0, 0); + static uint32_t PrimitiveClassSize(size_t pointer_size) { + return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, @@ -673,60 +673,82 @@ class MANAGED Class FINAL : public Object { // Also updates the dex_cache_strings_ variable from new_dex_cache. void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods() + ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods) + ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\ + + void SetDirectMethodsPtr(ArtMethod* new_direct_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Used by image writer. + void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t + ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Use only when we are allocating populating the method arrays. + ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Returns the number of static, private, and constructor methods. - uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_)); + } + void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num); + } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE ObjectArray<ArtMethod>* GetVirtualMethods() + ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods) + ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the number of non-inherited virtual methods. - ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_)); + } + void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num); + } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ArtMethod* GetVirtualMethodDuringLinking(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t + ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTableDuringLinking() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetVTable(ObjectArray<ArtMethod>* new_vtable) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static MemberOffset VTableOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, vtable_); } - static MemberOffset EmbeddedImTableOffset() { - return MemberOffset(sizeof(Class)); - } - static MemberOffset EmbeddedVTableLengthOffset() { - return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry)); - } - - static MemberOffset EmbeddedVTableOffset() { - return MemberOffset(sizeof(Class) + kImtSize * sizeof(ImTableEntry) + sizeof(int32_t)); + return MemberOffset(sizeof(Class)); } bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -735,90 +757,117 @@ class MANAGED Class FINAL : public Object { bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetEmbeddedImTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size); - void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size); + + ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetEmbeddedVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope) + inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given a method implemented by this class but potentially from a super class, return the // specific implementation method for this class. - ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method) + ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given a method implemented by this class' super class, return the specific implementation // method for this class. - ArtMethod* FindVirtualMethodForSuper(ArtMethod* method) + ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Given a method implemented by this class, but potentially from a // super class or interface, return the specific implementation // method for this class. - ArtMethod* FindVirtualMethodForInterface(ArtMethod* method) + ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE; - ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method) + ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) + ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature) + ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) + ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) + ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) + ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) + ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) + ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature) + ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) + ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) + ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature) + ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) + ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature) + ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature) + ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) + ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* FindClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -867,7 +916,8 @@ class MANAGED Class FINAL : public Object { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the offset of the first reference instance field. Other reference instance fields follow. - MemberOffset GetFirstReferenceInstanceFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetFirstReferenceInstanceFieldOffset() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the number of static fields containing reference types. uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -886,10 +936,11 @@ class MANAGED Class FINAL : public Object { } // Get the offset of the first reference static field. Other reference static fields follow. - MemberOffset GetFirstReferenceStaticFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the offset of the first reference static field. Other reference static fields follow. - MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking() + MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Gets the static fields of the class. @@ -989,22 +1040,20 @@ class MANAGED Class FINAL : public Object { static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and + // ArtMethods. template<class Visitor> - // Visit field roots. - void VisitFieldRoots(Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitNativeRoots(Visitor& visitor, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // When class is verified, set the kAccPreverified flag on each method. - void SetPreverifiedFlagOnAllMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetPreverifiedFlagOnAllMethods(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <bool kVisitClass, typename Visitor> void VisitReferences(mirror::Class* klass, const Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Visit references within the embedded tables of the class. - // TODO: remove NO_THREAD_SAFETY_ANALYSIS when annotalysis handles visitors better. - template<typename Visitor> - void VisitEmbeddedImtAndVTable(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS; - // Get the descriptor of the class. In a few cases a std::string is required, rather than // always create one the storage argument is populated and its internal c_str() returned. We do // this to avoid memory allocation in the common case. @@ -1014,7 +1063,6 @@ class MANAGED Class FINAL : public Object { bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -1037,8 +1085,8 @@ class MANAGED Class FINAL : public Object { void AssertInitializedOrInitializingInThread(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - Class* CopyOf(Thread* self, int32_t new_length, StackHandleScope<kImtSize>* imt_handle_scope) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize], + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For proxy class only. ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -1060,7 +1108,7 @@ class MANAGED Class FINAL : public Object { } // May cause thread suspension due to EqualParameters. - mirror::ArtMethod* GetDeclaredConstructor( + ArtMethod* GetDeclaredConstructor( Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -1085,6 +1133,20 @@ class MANAGED Class FINAL : public Object { return GetClassLoader() == nullptr; } + static size_t ImTableEntrySize(size_t pointer_size) { + return pointer_size; + } + + static size_t VTableEntrySize(size_t pointer_size) { + return pointer_size; + } + + ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -1109,6 +1171,12 @@ class MANAGED Class FINAL : public Object { bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Check that the pointer size mathces the one in the class linker. + ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size); + + static MemberOffset EmbeddedImTableOffset(size_t pointer_size); + static MemberOffset EmbeddedVTableOffset(size_t pointer_size); + // Defining class loader, or null for the "bootstrap" system loader. HeapReference<ClassLoader> class_loader_; @@ -1123,9 +1191,6 @@ class MANAGED Class FINAL : public Object { // Short cuts to dex_cache_ member for fast compiled code access. HeapReference<ObjectArray<String>> dex_cache_strings_; - // static, private, and <init> methods - HeapReference<ObjectArray<ArtMethod>> direct_methods_; - // The interface table (iftable_) contains pairs of a interface class and an array of the // interface methods. There is one pair per interface supported by this class. That means one // pair for each interface we support directly, indirectly via superclass, or indirectly via a @@ -1148,19 +1213,19 @@ class MANAGED Class FINAL : public Object { // If class verify fails, we must return same error on subsequent tries. HeapReference<Class> verify_error_class_; - // Virtual methods defined in this class; invoked through vtable. - HeapReference<ObjectArray<ArtMethod>> virtual_methods_; - // Virtual method table (vtable), for use by "invoke-virtual". The vtable from the superclass is // copied in, and virtual methods from our class either replace those from the super or are // appended. For abstract classes, methods may be created in the vtable that aren't in // virtual_ methods_ for miranda methods. - HeapReference<ObjectArray<ArtMethod>> vtable_; + HeapReference<PointerArray> vtable_; // Access flags; low 16 bits are defined by VM spec. // Note: Shuffled back. uint32_t access_flags_; + // static, private, and <init> methods. Pointer to an ArtMethod array. + uint64_t direct_methods_; + // instance fields // // These describe the layout of the contents of an Object. @@ -1174,6 +1239,9 @@ class MANAGED Class FINAL : public Object { // Static fields uint64_t sfields_; + // Virtual methods defined in this class; invoked through vtable. Pointer to an ArtMethod array. + uint64_t virtual_methods_; + // Total size of the Class instance; used when allocating storage on gc heap. // See also object_size_. uint32_t class_size_; @@ -1189,7 +1257,10 @@ class MANAGED Class FINAL : public Object { // TODO: really 16bits int32_t dex_type_idx_; - // Number of static fields. + // Number of direct fields. + uint32_t num_direct_methods_; + + // Number of instance fields. uint32_t num_instance_fields_; // Number of instance fields that are object refs. @@ -1201,6 +1272,9 @@ class MANAGED Class FINAL : public Object { // Number of static fields. uint32_t num_static_fields_; + // Number of virtual methods. + uint32_t num_virtual_methods_; + // Total object size; used when allocating storage on gc heap. // (For interfaces and abstract classes this will be zero.) // See also class_size_. diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 1cb437e8ea..4b5063acd2 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -20,6 +20,7 @@ #include "dex_cache.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "mirror/class.h" #include "runtime.h" @@ -27,20 +28,9 @@ namespace art { namespace mirror { -inline uint32_t DexCache::ClassSize() { +inline uint32_t DexCache::ClassSize(size_t pointer_size) { uint32_t vtable_entries = Object::kVTableLength + 5; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); -} - -inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ArtMethod* method = GetResolvedMethods()->Get(method_idx); - // Hide resolution trampoline methods from the caller - if (method != nullptr && method->IsRuntimeMethod()) { - DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod()); - return nullptr; - } - return method; + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) { @@ -50,15 +40,8 @@ inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) { } inline ArtField* DexCache::GetResolvedField(uint32_t idx, size_t ptr_size) { - ArtField* field = nullptr; - if (ptr_size == 8) { - field = reinterpret_cast<ArtField*>( - static_cast<uintptr_t>(GetResolvedFields()->AsLongArray()->GetWithoutChecks(idx))); - } else { - DCHECK_EQ(ptr_size, 4u); - field = reinterpret_cast<ArtField*>( - static_cast<uintptr_t>(GetResolvedFields()->AsIntArray()->GetWithoutChecks(idx))); - } + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); + auto* field = GetResolvedFields()->GetElementPtrSize<ArtField*>(idx, ptr_size); if (field == nullptr || field->GetDeclaringClass()->IsErroneous()) { return nullptr; } @@ -66,15 +49,24 @@ inline ArtField* DexCache::GetResolvedField(uint32_t idx, size_t ptr_size) { } inline void DexCache::SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) { - if (ptr_size == 8) { - GetResolvedFields()->AsLongArray()->Set( - idx, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(field))); - } else { - DCHECK_EQ(ptr_size, 4u); - CHECK_LE(reinterpret_cast<uintptr_t>(field), 0xFFFFFFFF); - GetResolvedFields()->AsIntArray()->Set( - idx, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(field))); + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); + GetResolvedFields()->SetElementPtrSize(idx, field, ptr_size); +} + +inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, size_t ptr_size) { + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); + auto* method = GetResolvedMethods()->GetElementPtrSize<ArtMethod*>(method_idx, ptr_size); + // Hide resolution trampoline methods from the caller + if (method != nullptr && method->IsRuntimeMethod()) { + DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod()); + return nullptr; } + return method; +} + +inline void DexCache::SetResolvedMethod(uint32_t idx, ArtMethod* method, size_t ptr_size) { + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); + GetResolvedMethods()->SetElementPtrSize(idx, method, ptr_size); } } // namespace mirror diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc index ade8bd27ec..630faee356 100644 --- a/runtime/mirror/dex_cache.cc +++ b/runtime/mirror/dex_cache.cc @@ -31,12 +31,9 @@ namespace art { namespace mirror { -void DexCache::Init(const DexFile* dex_file, - String* location, - ObjectArray<String>* strings, - ObjectArray<Class>* resolved_types, - ObjectArray<ArtMethod>* resolved_methods, - Array* resolved_fields) { +void DexCache::Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings, + ObjectArray<Class>* resolved_types, PointerArray* resolved_methods, + PointerArray* resolved_fields, size_t pointer_size) { CHECK(dex_file != nullptr); CHECK(location != nullptr); CHECK(strings != nullptr); @@ -51,24 +48,21 @@ void DexCache::Init(const DexFile* dex_file, SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types); SetFieldObject<false>(ResolvedMethodsOffset(), resolved_methods); - Runtime* runtime = Runtime::Current(); + Runtime* const runtime = Runtime::Current(); if (runtime->HasResolutionMethod()) { // Initialize the resolve methods array to contain trampolines for resolution. - ArtMethod* trampoline = runtime->GetResolutionMethod(); - for (size_t i = 0, length = resolved_methods->GetLength(); i < length; i++) { - resolved_methods->SetWithoutChecks<false>(i, trampoline); - } + Fixup(runtime->GetResolutionMethod(), pointer_size); } } -void DexCache::Fixup(ArtMethod* trampoline) { +void DexCache::Fixup(ArtMethod* trampoline, size_t pointer_size) { // Fixup the resolve methods array to contain trampoline for resolution. CHECK(trampoline != nullptr); - ObjectArray<ArtMethod>* resolved_methods = GetResolvedMethods(); - size_t length = resolved_methods->GetLength(); - for (size_t i = 0; i < length; i++) { - if (resolved_methods->GetWithoutChecks(i) == nullptr) { - resolved_methods->SetWithoutChecks<false>(i, trampoline); + CHECK(trampoline->IsRuntimeMethod()); + auto* resolved_methods = GetResolvedMethods(); + for (size_t i = 0, length = resolved_methods->GetLength(); i < length; i++) { + if (resolved_methods->GetElementPtrSize<ArtMethod*>(i, pointer_size) == nullptr) { + resolved_methods->SetElementPtrSize(i, trampoline, pointer_size); } } } diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index 7e30b891de..0ce83ec746 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -17,6 +17,7 @@ #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_ #define ART_RUNTIME_MIRROR_DEX_CACHE_H_ +#include "array.h" #include "art_field.h" #include "art_method.h" #include "class.h" @@ -38,22 +39,19 @@ class String; class MANAGED DexCache FINAL : public Object { public: // Size of java.lang.DexCache.class. - static uint32_t ClassSize(); + static uint32_t ClassSize(size_t pointer_size); // Size of an instance of java.lang.DexCache not including referenced values. static constexpr uint32_t InstanceSize() { return sizeof(DexCache); } - void Init(const DexFile* dex_file, - String* location, - ObjectArray<String>* strings, - ObjectArray<Class>* types, - ObjectArray<ArtMethod>* methods, - Array* fields) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings, + ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields, + size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Fixup(ArtMethod* trampoline, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); @@ -109,19 +107,18 @@ class MANAGED DexCache FINAL : public Object { void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetResolvedMethods()->Set(method_idx, resolved); - } + ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Pointer sized variant, used for patching. - ArtField* GetResolvedField(uint32_t idx, size_t ptr_size) + ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Pointer sized variant, used for patching. - void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) + ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -133,13 +130,12 @@ class MANAGED DexCache FINAL : public Object { OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_)); } - ObjectArray<ArtMethod>* GetResolvedMethods() ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject< ObjectArray<ArtMethod>>(ResolvedMethodsOffset()); + PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject<PointerArray>(ResolvedMethodsOffset()); } - Array* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject<Array>(ResolvedFieldsOffset()); + PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldObject<PointerArray>(ResolvedFieldsOffset()); } const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -154,9 +150,9 @@ class MANAGED DexCache FINAL : public Object { private: HeapReference<Object> dex_; HeapReference<String> location_; - // Either an int array or long array (64 bit). - HeapReference<Object> resolved_fields_; - HeapReference<ObjectArray<ArtMethod>> resolved_methods_; + // Either an int array or long array based on runtime ISA since these arrays hold pointers. + HeapReference<PointerArray> resolved_fields_; + HeapReference<PointerArray> resolved_methods_; HeapReference<ObjectArray<Class>> resolved_types_; HeapReference<ObjectArray<String>> strings_; uint64_t dex_file_; diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h index 388921b44e..8a0daec4c2 100644 --- a/runtime/mirror/field-inl.h +++ b/runtime/mirror/field-inl.h @@ -50,14 +50,14 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, } } auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self))); - if (ret.Get() == nullptr) { - if (kIsDebugBuild) { - self->AssertPendingException(); - } + if (UNLIKELY(ret.Get() == nullptr)) { + self->AssertPendingOOMException(); return nullptr; } + const auto pointer_size = kTransactionActive ? + Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*); auto dex_field_index = field->GetDexFieldIndex(); - auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, sizeof(void*)); + auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size); if (field->GetDeclaringClass()->IsProxyClass()) { DCHECK(field->IsStatic()); DCHECK_LT(dex_field_index, 2U); @@ -70,7 +70,7 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, } else { // We rely on the field being resolved so that we can back to the ArtField // (i.e. FromReflectedMethod). - field->GetDexCache()->SetResolvedField(dex_field_index, field, sizeof(void*)); + field->GetDexCache()->SetResolvedField(dex_field_index, field, pointer_size); } } ret->SetType<kTransactionActive>(type.Get()); diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc index ac56129a16..02e4484a98 100644 --- a/runtime/mirror/field.cc +++ b/runtime/mirror/field.cc @@ -16,6 +16,7 @@ #include "field-inl.h" +#include "class-inl.h" #include "dex_cache-inl.h" #include "object_array-inl.h" #include "object-inl.h" diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h index 1c1c7b30eb..1ea5beeae3 100644 --- a/runtime/mirror/iftable.h +++ b/runtime/mirror/iftable.h @@ -34,27 +34,22 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> { ALWAYS_INLINE void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray<ArtMethod>* method_array = - down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray)); + PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); DCHECK(method_array != nullptr); return method_array; } size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ObjectArray<ArtMethod>* method_array = - down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray)); - if (method_array == nullptr) { - return 0; - } - return method_array->GetLength(); + auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); + return method_array == nullptr ? 0u : method_array->GetLength(); } - void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(new_ma != nullptr); - DCHECK(Get((i * kMax) + kMethodArray) == nullptr); - Set<false>((i * kMax) + kMethodArray, new_ma); + void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(arr != nullptr); + auto idx = i * kMax + kMethodArray; + DCHECK(Get(idx) == nullptr); + Set<false>(idx, arr); } size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc index 81530bb130..85c52e95df 100644 --- a/runtime/mirror/method.cc +++ b/runtime/mirror/method.cc @@ -16,7 +16,9 @@ #include "method.h" -#include "mirror/art_method.h" +#include "art_method.h" +#include "gc_root-inl.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" namespace art { @@ -49,7 +51,7 @@ void Method::ResetArrayClass() { array_class_ = GcRoot<Class>(nullptr); } -Method* Method::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) { +Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) { DCHECK(!method->IsConstructor()) << PrettyMethod(method); auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self)); if (LIKELY(ret != nullptr)) { @@ -90,7 +92,7 @@ void Constructor::VisitRoots(RootVisitor* visitor) { array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); } -Constructor* Constructor::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) { +Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) { DCHECK(method->IsConstructor()) << PrettyMethod(method); auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self)); if (LIKELY(ret != nullptr)) { diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h index 88100f08e2..42c76c045c 100644 --- a/runtime/mirror/method.h +++ b/runtime/mirror/method.h @@ -28,7 +28,7 @@ class Class; // C++ mirror of java.lang.reflect.Method. class MANAGED Method : public AbstractMethod { public: - static Method* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) + static Method* CreateFromArtMethod(Thread* self, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -59,7 +59,7 @@ class MANAGED Method : public AbstractMethod { // C++ mirror of java.lang.reflect.Constructor. class MANAGED Constructor: public AbstractMethod { public: - static Constructor* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) + static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 39d0f5664f..05c44e51cc 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -24,6 +24,7 @@ #include "atomic.h" #include "array-inl.h" #include "class.h" +#include "class_linker.h" #include "lock_word-inl.h" #include "monitor.h" #include "object_array-inl.h" @@ -36,9 +37,9 @@ namespace art { namespace mirror { -inline uint32_t Object::ClassSize() { +inline uint32_t Object::ClassSize(size_t pointer_size) { uint32_t vtable_entries = kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0); + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> @@ -253,18 +254,6 @@ inline bool Object::IsArrayInstance() { template IsArrayClass<kVerifyFlags, kReadBarrierOption>(); } -template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> -inline bool Object::IsArtMethod() { - return GetClass<kVerifyFlags, kReadBarrierOption>()-> - template IsArtMethodClass<kReadBarrierOption>(); -} - -template<VerifyObjectFlags kVerifyFlags> -inline ArtMethod* Object::AsArtMethod() { - DCHECK(IsArtMethod<kVerifyFlags>()); - return down_cast<ArtMethod*>(this); -} - template<VerifyObjectFlags kVerifyFlags> inline bool Object::IsReferenceInstance() { return GetClass<kVerifyFlags>()->IsTypeOfReferenceClass(); @@ -292,7 +281,7 @@ inline BooleanArray* Object::AsBooleanArray() { template<VerifyObjectFlags kVerifyFlags> inline ByteArray* Object::AsByteArray() { - static const VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); DCHECK(GetClass<kVerifyFlags>()->IsArrayClass()); DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte()); return down_cast<ByteArray*>(this); @@ -300,7 +289,7 @@ inline ByteArray* Object::AsByteArray() { template<VerifyObjectFlags kVerifyFlags> inline ByteArray* Object::AsByteSizedArray() { - constexpr VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); DCHECK(GetClass<kVerifyFlags>()->IsArrayClass()); DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() || GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean()); @@ -333,25 +322,41 @@ inline ShortArray* Object::AsShortSizedArray() { } template<VerifyObjectFlags kVerifyFlags> -inline IntArray* Object::AsIntArray() { +inline bool Object::IsIntArray() { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); - CHECK(GetClass<kVerifyFlags>()->IsArrayClass()); - CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveInt() || - GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat()); + auto* component_type = GetClass<kVerifyFlags>()->GetComponentType(); + return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>(); +} + +template<VerifyObjectFlags kVerifyFlags> +inline IntArray* Object::AsIntArray() { + DCHECK(IsIntArray<kVerifyFlags>()); return down_cast<IntArray*>(this); } template<VerifyObjectFlags kVerifyFlags> -inline LongArray* Object::AsLongArray() { +inline bool Object::IsLongArray() { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); - CHECK(GetClass<kVerifyFlags>()->IsArrayClass()); - CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveLong() || - GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble()); + auto* component_type = GetClass<kVerifyFlags>()->GetComponentType(); + return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>(); +} + +template<VerifyObjectFlags kVerifyFlags> +inline LongArray* Object::AsLongArray() { + DCHECK(IsLongArray<kVerifyFlags>()); return down_cast<LongArray*>(this); } template<VerifyObjectFlags kVerifyFlags> +inline bool Object::IsFloatArray() { + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + auto* component_type = GetClass<kVerifyFlags>()->GetComponentType(); + return component_type != nullptr && component_type->template IsPrimitiveFloat<kNewFlags>(); +} + +template<VerifyObjectFlags kVerifyFlags> inline FloatArray* Object::AsFloatArray() { + DCHECK(IsFloatArray<kVerifyFlags>()); constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); DCHECK(GetClass<kVerifyFlags>()->IsArrayClass()); DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat()); @@ -359,7 +364,15 @@ inline FloatArray* Object::AsFloatArray() { } template<VerifyObjectFlags kVerifyFlags> +inline bool Object::IsDoubleArray() { + constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); + auto* component_type = GetClass<kVerifyFlags>()->GetComponentType(); + return component_type != nullptr && component_type->template IsPrimitiveDouble<kNewFlags>(); +} + +template<VerifyObjectFlags kVerifyFlags> inline DoubleArray* Object::AsDoubleArray() { + DCHECK(IsDoubleArray<kVerifyFlags>()); constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); DCHECK(GetClass<kVerifyFlags>()->IsArrayClass()); DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble()); @@ -950,8 +963,11 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v if (num_reference_fields == 0u) { continue; } + // Presumably GC can happen when we are cross compiling, it should not cause performance + // problems to do pointer size logic. MemberOffset field_offset = kIsStatic - ? klass->GetFirstReferenceStaticFieldOffset() + ? klass->GetFirstReferenceStaticFieldOffset( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()) : klass->GetFirstReferenceInstanceFieldOffset(); for (size_t i = 0; i < num_reference_fields; ++i) { // TODO: Do a simpler check? diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index f9740bbfae..b177e2f579 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -106,9 +106,8 @@ class CopyObjectVisitor { : self_(self), orig_(orig), num_bytes_(num_bytes) { } - void operator()(Object* obj, size_t usable_size) const + void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UNUSED(usable_size); Object::CopyObject(self_, obj, orig_->Get(), num_bytes_); } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 5afe99f3f8..60c756ad2b 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -25,6 +25,7 @@ namespace art { class ArtField; +class ArtMethod; class ImageWriter; class LockWord; class Monitor; @@ -34,7 +35,6 @@ class VoidFunctor; namespace mirror { -class ArtMethod; class Array; class Class; class FinalizerReference; @@ -71,7 +71,7 @@ class MANAGED LOCKABLE Object { static constexpr size_t kVTableLength = 11; // The size of the java.lang.Class representing a java.lang.Object. - static uint32_t ClassSize(); + static uint32_t ClassSize(size_t pointer_size); // Size of an instance of java.lang.Object. static constexpr uint32_t InstanceSize() { @@ -176,12 +176,22 @@ class MANAGED LOCKABLE Object { ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -196,12 +206,6 @@ class MANAGED LOCKABLE Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -429,7 +433,7 @@ class MANAGED LOCKABLE Object { field_offset, static_cast<int32_t>(ptr)); } else { SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>( - field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value))); + field_offset, static_cast<int64_t>(reinterpret_cast<uintptr_t>(new_value))); } } // TODO fix thread safety analysis broken by the use of template. This should be @@ -463,8 +467,8 @@ class MANAGED LOCKABLE Object { } else { int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset); // Check that we dont lose any non 0 bits. - DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v); - return reinterpret_cast<T>(v); + DCHECK_EQ(static_cast<int64_t>(static_cast<uintptr_t>(v)), v); + return reinterpret_cast<T>(static_cast<uintptr_t>(v)); } } diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h index 6404faf3f5..5eddc18745 100644 --- a/runtime/mirror/object_array.h +++ b/runtime/mirror/object_array.h @@ -26,8 +26,8 @@ template<class T> class MANAGED ObjectArray: public Array { public: // The size of Object[].class. - static uint32_t ClassSize() { - return Array::ClassSize(); + static uint32_t ClassSize(size_t pointer_size) { + return Array::ClassSize(pointer_size); } static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length, diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 8e50a7a8fc..85ea28f9f5 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -77,9 +77,9 @@ class ObjectTest : public CommonRuntimeTest { TEST_F(ObjectTest, Constants) { EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>)); EXPECT_EQ(kObjectHeaderSize, sizeof(Object)); - EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32, + EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_32, ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()); - EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64, + EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_64, ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value()); } @@ -306,7 +306,7 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) { // pretend we are trying to call 'new char[3]' from String.toCharArray ScopedObjectAccess soa(Thread::Current()); Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;"); - ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V"); + ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*)); const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I"); ASSERT_TRUE(string_id != nullptr); const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId( @@ -366,7 +366,7 @@ TEST_F(ObjectTest, StaticFieldFromCode) { StackHandleScope<2> hs(soa.Self()); Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader))); Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader); - ArtMethod* clinit = klass->FindClassInitializer(); + ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*)); const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;"); ASSERT_TRUE(klass_string_id != nullptr); const DexFile::TypeId* klass_type_id = dex_file->FindTypeId( @@ -508,22 +508,22 @@ TEST_F(ObjectTest, DescriptorCompare) { Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2); ASSERT_TRUE(klass2 != nullptr); - ArtMethod* m1_1 = klass1->GetVirtualMethod(0); + ArtMethod* m1_1 = klass1->GetVirtualMethod(0, sizeof(void*)); EXPECT_STREQ(m1_1->GetName(), "m1"); - ArtMethod* m2_1 = klass1->GetVirtualMethod(1); + ArtMethod* m2_1 = klass1->GetVirtualMethod(1, sizeof(void*)); EXPECT_STREQ(m2_1->GetName(), "m2"); - ArtMethod* m3_1 = klass1->GetVirtualMethod(2); + ArtMethod* m3_1 = klass1->GetVirtualMethod(2, sizeof(void*)); EXPECT_STREQ(m3_1->GetName(), "m3"); - ArtMethod* m4_1 = klass1->GetVirtualMethod(3); + ArtMethod* m4_1 = klass1->GetVirtualMethod(3, sizeof(void*)); EXPECT_STREQ(m4_1->GetName(), "m4"); - ArtMethod* m1_2 = klass2->GetVirtualMethod(0); + ArtMethod* m1_2 = klass2->GetVirtualMethod(0, sizeof(void*)); EXPECT_STREQ(m1_2->GetName(), "m1"); - ArtMethod* m2_2 = klass2->GetVirtualMethod(1); + ArtMethod* m2_2 = klass2->GetVirtualMethod(1, sizeof(void*)); EXPECT_STREQ(m2_2->GetName(), "m2"); - ArtMethod* m3_2 = klass2->GetVirtualMethod(2); + ArtMethod* m3_2 = klass2->GetVirtualMethod(2, sizeof(void*)); EXPECT_STREQ(m3_2->GetName(), "m3"); - ArtMethod* m4_2 = klass2->GetVirtualMethod(3); + ArtMethod* m4_2 = klass2->GetVirtualMethod(3, sizeof(void*)); EXPECT_STREQ(m4_2->GetName(), "m4"); } diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h index d1d2a3af76..01e99b9e9d 100644 --- a/runtime/mirror/reference-inl.h +++ b/runtime/mirror/reference-inl.h @@ -22,9 +22,9 @@ namespace art { namespace mirror { -inline uint32_t Reference::ClassSize() { +inline uint32_t Reference::ClassSize(size_t pointer_size) { uint32_t vtable_entries = Object::kVTableLength + 5; - return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0); + return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size); } inline bool Reference::IsEnqueuable() { diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc index 70bcf92e7d..3c7f8c8e63 100644 --- a/runtime/mirror/reference.cc +++ b/runtime/mirror/reference.cc @@ -16,7 +16,7 @@ #include "reference.h" -#include "mirror/art_method.h" +#include "art_method.h" #include "gc_root-inl.h" namespace art { diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index c11d79dfff..4bbdb99553 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -42,7 +42,7 @@ namespace mirror { class MANAGED Reference : public Object { public: // Size of java.lang.ref.Reference.class. - static uint32_t ClassSize(); + static uint32_t ClassSize(size_t pointer_size); // Size of an instance of java.lang.ref.Reference. static constexpr uint32_t InstanceSize() { diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index 35b8aef10a..9f6cd11c3e 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -30,9 +30,9 @@ namespace art { namespace mirror { -inline uint32_t String::ClassSize() { +inline uint32_t String::ClassSize(size_t pointer_size) { uint32_t vtable_entries = Object::kVTableLength + 52; - return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2); + return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2, pointer_size); } // Sets string count in the allocation code path to ensure it is guarded by a CAS. diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h index fcfe97678f..a8f16d78ff 100644 --- a/runtime/mirror/string.h +++ b/runtime/mirror/string.h @@ -34,7 +34,7 @@ namespace mirror { class MANAGED String FINAL : public Object { public: // Size of java.lang.String.class. - static uint32_t ClassSize(); + static uint32_t ClassSize(size_t pointer_size); // Size of an instance of java.lang.String not including its value array. static constexpr uint32_t InstanceSize() { diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index 782b9c0762..1c21edbc42 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -71,9 +71,18 @@ bool Throwable::IsCheckedException() { int32_t Throwable::GetStackDepth() { Object* stack_state = GetStackState(); - if (stack_state == nullptr || !stack_state->IsObjectArray()) return -1; - ObjectArray<Object>* method_trace = down_cast<ObjectArray<Object>*>(stack_state); - return method_trace->GetLength() - 1; + if (stack_state == nullptr) { + return -1; + } + if (!stack_state->IsIntArray() && !stack_state->IsLongArray()) { + return -1; + } + mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray()); + int32_t array_len = method_trace->GetLength(); + // The format is [method pointers][pcs] so the depth is half the length (see method + // BuildInternalStackTraceVisitor::Init). + CHECK_EQ(array_len % 2, 0); + return array_len / 2; } std::string Throwable::Dump() { @@ -86,17 +95,21 @@ std::string Throwable::Dump() { result += "\n"; Object* stack_state = GetStackState(); // check stack state isn't missing or corrupt - if (stack_state != nullptr && stack_state->IsObjectArray()) { + if (stack_state != nullptr && + (stack_state->IsIntArray() || stack_state->IsLongArray())) { // Decode the internal stack trace into the depth and method trace - ObjectArray<Object>* method_trace = down_cast<ObjectArray<Object>*>(stack_state); - int32_t depth = method_trace->GetLength() - 1; - IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); + // Format is [method pointers][pcs] + auto* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray()); + auto array_len = method_trace->GetLength(); + CHECK_EQ(array_len % 2, 0); + const auto depth = array_len / 2; if (depth == 0) { result += "(Throwable with empty stack trace)"; } else { + auto ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); for (int32_t i = 0; i < depth; ++i) { - mirror::ArtMethod* method = down_cast<ArtMethod*>(method_trace->Get(i)); - uint32_t dex_pc = pc_trace->Get(i); + ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, ptr_size); + uintptr_t dex_pc = method_trace->GetElementPtrSize<uintptr_t>(i + depth, ptr_size); int32_t line_number = method->GetLineNumFromDexPC(dex_pc); const char* source_file = method->GetDeclaringClassSourceFile(); result += StringPrintf(" at %s (%s:%d)\n", PrettyMethod(method, true).c_str(), @@ -108,8 +121,7 @@ std::string Throwable::Dump() { if (stack_trace != nullptr && stack_trace->IsObjectArray()) { CHECK_EQ(stack_trace->GetClass()->GetComponentType(), StackTraceElement::GetStackTraceElement()); - ObjectArray<StackTraceElement>* ste_array = - down_cast<ObjectArray<StackTraceElement>*>(stack_trace); + auto* ste_array = down_cast<ObjectArray<StackTraceElement>*>(stack_trace); if (ste_array->GetLength() == 0) { result += "(Throwable with empty stack trace)"; } else { diff --git a/runtime/monitor.cc b/runtime/monitor.cc index dc016a5006..4be25d6946 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -21,6 +21,7 @@ #include <cutils/trace.h> #include <vector> +#include "art_method-inl.h" #include "base/mutex.h" #include "base/stl_util.h" #include "base/time_utils.h" @@ -28,7 +29,6 @@ #include "dex_file-inl.h" #include "dex_instruction.h" #include "lock_word-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -245,7 +245,7 @@ void Monitor::Lock(Thread* self) { // Contended. const bool log_contention = (lock_profiling_threshold_ != 0); uint64_t wait_start_ms = log_contention ? MilliTime() : 0; - mirror::ArtMethod* owners_method = locking_method_; + ArtMethod* owners_method = locking_method_; uint32_t owners_dex_pc = locking_dex_pc_; // Do this before releasing the lock so that we don't get deflated. size_t num_waiters = num_waiters_; @@ -449,7 +449,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, int prev_lock_count = lock_count_; lock_count_ = 0; owner_ = nullptr; - mirror::ArtMethod* saved_method = locking_method_; + ArtMethod* saved_method = locking_method_; locking_method_ = nullptr; uintptr_t saved_dex_pc = locking_dex_pc_; locking_dex_pc_ = 0; @@ -994,14 +994,15 @@ mirror::Object* Monitor::GetContendedMonitor(Thread* thread) { void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*), void* callback_context, bool abort_on_failure) { - mirror::ArtMethod* m = stack_visitor->GetMethod(); + ArtMethod* m = stack_visitor->GetMethod(); CHECK(m != nullptr); // Native methods are an easy special case. // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too. if (m->IsNative()) { if (m->IsSynchronized()) { - mirror::Object* jni_this = stack_visitor->GetCurrentHandleScope()->GetReference(0); + mirror::Object* jni_this = + stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0); callback(jni_this, callback_context); } return; @@ -1087,7 +1088,7 @@ bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return owner_ != nullptr; } -void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc, +void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc, const char** source_file, uint32_t* line_number) const { // If method is null, location is unknown if (method == nullptr) { diff --git a/runtime/monitor.h b/runtime/monitor.h index b7245c1439..8f3a91d7f6 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -36,14 +36,14 @@ namespace art { +class ArtMethod; class LockWord; template<class T> class Handle; -class Thread; class StackVisitor; +class Thread; typedef uint32_t MonitorId; namespace mirror { - class ArtMethod; class Object; } // namespace mirror @@ -226,7 +226,7 @@ class Monitor { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. - void TranslateLocation(mirror::ArtMethod* method, uint32_t pc, + void TranslateLocation(ArtMethod* method, uint32_t pc, const char** source_file, uint32_t* line_number) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -262,7 +262,7 @@ class Monitor { // Method and dex pc where the lock owner acquired the lock, used when lock // sampling is enabled. locking_method_ may be null if the lock is currently // unlocked, or if the lock is acquired by the system when the stack is empty. - mirror::ArtMethod* locking_method_ GUARDED_BY(monitor_lock_); + ArtMethod* locking_method_ GUARDED_BY(monitor_lock_); uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_); // The denser encoded version of this monitor as stored in the lock word. diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc index 48c9cceff4..efe2e823d9 100644 --- a/runtime/monitor_android.cc +++ b/runtime/monitor_android.cc @@ -78,7 +78,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample // Emit the source code file name, <= 37 bytes. uint32_t pc; - mirror::ArtMethod* m = self->GetCurrentMethod(&pc); + ArtMethod* m = self->GetCurrentMethod(&pc); const char* filename; uint32_t line_number; TranslateLocation(m, pc, &filename, &line_number); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index a172197d71..5dd354d4d6 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -27,6 +27,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version); #include "toStringArray.h" #pragma GCC diagnostic pop +#include "art_method-inl.h" #include "arch/instruction_set.h" #include "class_linker-inl.h" #include "common_throws.h" @@ -40,7 +41,6 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version); #include "gc/task_processor.h" #include "intern_table.h" #include "jni_internal.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" @@ -350,7 +350,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx, InvokeType invoke_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx); + ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*)); if (method != nullptr) { return; } @@ -363,14 +363,14 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui switch (invoke_type) { case kDirect: case kStatic: - method = klass->FindDirectMethod(dex_cache.Get(), method_idx); + method = klass->FindDirectMethod(dex_cache.Get(), method_idx, sizeof(void*)); break; case kInterface: - method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx); + method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, sizeof(void*)); break; case kSuper: case kVirtual: - method = klass->FindVirtualMethod(dex_cache.Get(), method_idx); + method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, sizeof(void*)); break; default: LOG(FATAL) << "Unreachable - invocation type: " << invoke_type; @@ -380,7 +380,7 @@ static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, ui return; } // LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method); - dex_cache->SetResolvedMethod(method_idx, method); + dex_cache->SetResolvedMethod(method_idx, method, sizeof(void*)); } struct DexCacheStats { @@ -452,7 +452,7 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled) } } for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) { - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j); + ArtMethod* method = dex_cache->GetResolvedMethod(j, sizeof(void*)); if (method != nullptr) { filled->num_methods++; } diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index 1d7d853431..ee62755ae4 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -16,9 +16,9 @@ #include "dalvik_system_VMStack.h" +#include "art_method-inl.h" #include "jni_internal.h" #include "nth_caller_visitor.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -90,10 +90,13 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) { bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(class_loader == nullptr); mirror::Class* c = GetMethod()->GetDeclaringClass(); - mirror::Object* cl = c->GetClassLoader(); - if (cl != nullptr) { - class_loader = cl; - return false; + // c is null for runtime methods. + if (c != nullptr) { + mirror::Object* cl = c->GetClassLoader(); + if (cl != nullptr) { + class_loader = cl; + return false; + } } return true; } diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 795a0eadca..94024ef4b2 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -273,7 +273,7 @@ static jobject Class_getDeclaredConstructorInternal( return nullptr; } -static ALWAYS_INLINE inline bool MethodMatchesConstructor(mirror::ArtMethod* m, bool public_only) +static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(m != nullptr); return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor(); @@ -283,14 +283,11 @@ static jobjectArray Class_getDeclaredConstructorsInternal( JNIEnv* env, jobject javaThis, jboolean publicOnly) { ScopedFastNativeObjectAccess soa(env); auto* klass = DecodeClass(soa, javaThis); - StackHandleScope<2> hs(soa.Self()); - auto h_direct_methods = hs.NewHandle(klass->GetDirectMethods()); + StackHandleScope<1> hs(soa.Self()); size_t constructor_count = 0; - auto count = h_direct_methods.Get() != nullptr ? h_direct_methods->GetLength() : 0u; // Two pass approach for speed. - for (size_t i = 0; i < count; ++i) { - constructor_count += MethodMatchesConstructor(h_direct_methods->GetWithoutChecks(i), - publicOnly != JNI_FALSE) ? 1u : 0u; + for (auto& m : klass->GetDirectMethods(sizeof(void*))) { + constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u; } auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc( soa.Self(), mirror::Constructor::ArrayClass(), constructor_count)); @@ -299,12 +296,11 @@ static jobjectArray Class_getDeclaredConstructorsInternal( return nullptr; } constructor_count = 0; - for (size_t i = 0; i < count; ++i) { - auto* method = h_direct_methods->GetWithoutChecks(i); - if (MethodMatchesConstructor(method, publicOnly != JNI_FALSE)) { - auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), method); + for (auto& m : klass->GetDirectMethods(sizeof(void*))) { + if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) { + auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), &m); if (UNLIKELY(constructor == nullptr)) { - soa.Self()->AssertPendingException(); + soa.Self()->AssertPendingOOMException(); return nullptr; } h_constructors->SetWithoutChecks<false>(constructor_count++, constructor); @@ -323,7 +319,7 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis, // were synthesized by the runtime. constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic; ScopedFastNativeObjectAccess soa(env); - StackHandleScope<5> hs(soa.Self()); + StackHandleScope<4> hs(soa.Self()); auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name)); if (UNLIKELY(h_method_name.Get() == nullptr)) { ThrowNullPointerException("name == null"); @@ -331,60 +327,49 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis, } auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args)); auto* klass = DecodeClass(soa, javaThis); - mirror::ArtMethod* result = nullptr; - auto* virtual_methods = klass->GetVirtualMethods(); - if (virtual_methods != nullptr) { - auto h_virtual_methods = hs.NewHandle(virtual_methods); - for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) { - auto* m = h_virtual_methods->GetWithoutChecks(i); - auto* np_method = m->GetInterfaceMethodIfProxy(); + ArtMethod* result = nullptr; + for (auto& m : klass->GetVirtualMethods(sizeof(void*))) { + auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*)); + // May cause thread suspension. + mirror::String* np_name = np_method->GetNameAsString(soa.Self()); + if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { + if (UNLIKELY(soa.Self()->IsExceptionPending())) { + return nullptr; + } + continue; + } + auto modifiers = m.GetAccessFlags(); + if ((modifiers & kSkipModifiers) == 0) { + return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m)); + } + if ((modifiers & kAccMiranda) == 0) { + result = &m; // Remember as potential result if it's not a miranda method. + } + } + if (result == nullptr) { + for (auto& m : klass->GetDirectMethods(sizeof(void*))) { + auto modifiers = m.GetAccessFlags(); + if ((modifiers & kAccConstructor) != 0) { + continue; + } + auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*)); // May cause thread suspension. mirror::String* np_name = np_method->GetNameAsString(soa.Self()); + if (np_name == nullptr) { + soa.Self()->AssertPendingException(); + return nullptr; + } if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { if (UNLIKELY(soa.Self()->IsExceptionPending())) { return nullptr; } continue; } - auto modifiers = m->GetAccessFlags(); if ((modifiers & kSkipModifiers) == 0) { - return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), m)); - } - if ((modifiers & kAccMiranda) == 0) { - result = m; // Remember as potential result if it's not a miranda method. - } - } - } - if (result == nullptr) { - auto* direct_methods = klass->GetDirectMethods(); - if (direct_methods != nullptr) { - auto h_direct_methods = hs.NewHandle(direct_methods); - for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) { - auto* m = h_direct_methods->GetWithoutChecks(i); - auto modifiers = m->GetAccessFlags(); - if ((modifiers & kAccConstructor) != 0) { - continue; - } - auto* np_method = m->GetInterfaceMethodIfProxy(); - // May cause thread suspension. - mirror::String* np_name = np_method ->GetNameAsString(soa.Self()); - if (np_name == nullptr) { - soa.Self()->AssertPendingException(); - return nullptr; - } - if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { - if (UNLIKELY(soa.Self()->IsExceptionPending())) { - return nullptr; - } - continue; - } - if ((modifiers & kSkipModifiers) == 0) { - return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod( - soa.Self(), m)); - } - // Direct methods cannot be miranda methods, so this potential result must be synthetic. - result = m; + return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), &m)); } + // Direct methods cannot be miranda methods, so this potential result must be synthetic. + result = &m; } } return result != nullptr ? @@ -395,64 +380,50 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis, static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis, jboolean publicOnly) { ScopedFastNativeObjectAccess soa(env); - StackHandleScope<5> hs(soa.Self()); + StackHandleScope<3> hs(soa.Self()); auto* klass = DecodeClass(soa, javaThis); - auto virtual_methods = hs.NewHandle(klass->GetVirtualMethods()); - auto direct_methods = hs.NewHandle(klass->GetDirectMethods()); size_t num_methods = 0; - if (virtual_methods.Get() != nullptr) { - for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) { - auto* m = virtual_methods->GetWithoutChecks(i); - auto modifiers = m->GetAccessFlags(); - if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && - (modifiers & kAccMiranda) == 0) { - ++num_methods; - } + for (auto& m : klass->GetVirtualMethods(sizeof(void*))) { + auto modifiers = m.GetAccessFlags(); + if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && + (modifiers & kAccMiranda) == 0) { + ++num_methods; } } - if (direct_methods.Get() != nullptr) { - for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) { - auto* m = direct_methods->GetWithoutChecks(i); - auto modifiers = m->GetAccessFlags(); - // Add non-constructor direct/static methods. - if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && - (modifiers & kAccConstructor) == 0) { - ++num_methods; - } + for (auto& m : klass->GetDirectMethods(sizeof(void*))) { + auto modifiers = m.GetAccessFlags(); + // Add non-constructor direct/static methods. + if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && + (modifiers & kAccConstructor) == 0) { + ++num_methods; } } auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc( soa.Self(), mirror::Method::ArrayClass(), num_methods)); num_methods = 0; - if (virtual_methods.Get() != nullptr) { - for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) { - auto* m = virtual_methods->GetWithoutChecks(i); - auto modifiers = m->GetAccessFlags(); - if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && - (modifiers & kAccMiranda) == 0) { - auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m); - if (method == nullptr) { - soa.Self()->AssertPendingException(); - return nullptr; - } - ret->SetWithoutChecks<false>(num_methods++, method); + for (auto& m : klass->GetVirtualMethods(sizeof(void*))) { + auto modifiers = m.GetAccessFlags(); + if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && + (modifiers & kAccMiranda) == 0) { + auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m); + if (method == nullptr) { + soa.Self()->AssertPendingException(); + return nullptr; } + ret->SetWithoutChecks<false>(num_methods++, method); } } - if (direct_methods.Get() != nullptr) { - for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) { - auto* m = direct_methods->GetWithoutChecks(i); - auto modifiers = m->GetAccessFlags(); - // Add non-constructor direct/static methods. - if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && - (modifiers & kAccConstructor) == 0) { - auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m); - if (method == nullptr) { - soa.Self()->AssertPendingException(); - return nullptr; - } - ret->SetWithoutChecks<false>(num_methods++, method); + for (auto& m : klass->GetDirectMethods(sizeof(void*))) { + auto modifiers = m.GetAccessFlags(); + // Add non-constructor direct/static methods. + if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) && + (modifiers & kAccConstructor) == 0) { + auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m); + if (method == nullptr) { + soa.Self()->AssertPendingException(); + return nullptr; } + ret->SetWithoutChecks<false>(num_methods++, method); } } return soa.AddLocalReference<jobjectArray>(ret.Get()); diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc index b9f8d01405..a2d9797114 100644 --- a/runtime/native/java_lang_DexCache.cc +++ b/runtime/native/java_lang_DexCache.cc @@ -18,6 +18,7 @@ #include "dex_file.h" #include "jni_internal.h" +#include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" #include "scoped_fast_native_object_access.h" diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index 40d658458e..9db47d842b 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -16,10 +16,9 @@ #include "java_lang_reflect_Constructor.h" +#include "art_method-inl.h" #include "class_linker.h" #include "jni_internal.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/method.h" #include "mirror/object-inl.h" diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index d6aa9b5fbf..ba898c6d2d 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -21,7 +21,6 @@ #include "common_throws.h" #include "dex_file-inl.h" #include "jni_internal.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/field.h" #include "reflection-inl.h" diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index c20d83245c..9533b4dc8a 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -16,10 +16,9 @@ #include "java_lang_reflect_Method.h" +#include "art_method-inl.h" #include "class_linker.h" #include "jni_internal.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -37,16 +36,17 @@ static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiv static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); - mirror::ArtMethod* proxy_method = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod); + ArtMethod* proxy_method = ArtMethod::FromReflectedMethod(soa, javaMethod); CHECK(proxy_method->GetDeclaringClass()->IsProxyClass()); mirror::Class* proxy_class = proxy_method->GetDeclaringClass(); int throws_index = -1; - size_t num_virt_methods = proxy_class->NumVirtualMethods(); - for (size_t i = 0; i < num_virt_methods; i++) { - if (proxy_class->GetVirtualMethod(i) == proxy_method) { + size_t i = 0; + for (const auto& m : proxy_class->GetVirtualMethods(sizeof(void*))) { + if (&m == proxy_method) { throws_index = i; break; } + ++i; } CHECK_NE(throws_index, -1); mirror::ObjectArray<mirror::Class>* declared_exceptions = diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h index dfabff5f6a..57b873bc22 100644 --- a/runtime/native/scoped_fast_native_object_access.h +++ b/runtime/native/scoped_fast_native_object_access.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_ #define ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_ -#include "mirror/art_method-inl.h" +#include "art_method-inl.h" #include "scoped_thread_state_change.h" namespace art { @@ -31,7 +31,7 @@ class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable { SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(env) { Locks::mutator_lock_->AssertSharedHeld(Self()); - DCHECK(Self()->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr()->IsFastNative()); + DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative()); // Don't work with raw objects in non-runnable states. DCHECK_EQ(Self()->GetState(), kRunnable); } diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index 17ebdff996..770644cef0 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -19,7 +19,7 @@ #include "gc/accounting/card_table-inl.h" #include "jni_internal.h" #include "mirror/array.h" -#include "mirror/object.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "scoped_fast_native_object_access.h" diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc index 0ad560e9eb..46cc5aaff8 100644 --- a/runtime/native_bridge_art_interface.cc +++ b/runtime/native_bridge_art_interface.cc @@ -20,10 +20,10 @@ #include "nativebridge/native_bridge.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/macros.h" #include "dex_file-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "scoped_thread_state_change.h" #include "sigchain.h" @@ -32,29 +32,24 @@ namespace art { static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { ScopedObjectAccess soa(env); - mirror::ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = soa.DecodeMethod(mid); return m->GetShorty(); } static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) { - if (clazz == nullptr) + if (clazz == nullptr) { return 0; + } ScopedObjectAccess soa(env); mirror::Class* c = soa.Decode<mirror::Class*>(clazz); uint32_t native_method_count = 0; - for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) { - mirror::ArtMethod* m = c->GetDirectMethod(i); - if (m->IsNative()) { - native_method_count++; - } + for (auto& m : c->GetDirectMethods(sizeof(void*))) { + native_method_count += m.IsNative() ? 1u : 0u; } - for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) { - mirror::ArtMethod* m = c->GetVirtualMethod(i); - if (m->IsNative()) { - native_method_count++; - } + for (auto& m : c->GetVirtualMethods(sizeof(void*))) { + native_method_count += m.IsNative() ? 1u : 0u; } return native_method_count; } @@ -68,29 +63,27 @@ static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* met mirror::Class* c = soa.Decode<mirror::Class*>(clazz); uint32_t count = 0; - for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) { - mirror::ArtMethod* m = c->GetDirectMethod(i); - if (m->IsNative()) { + for (auto& m : c->GetDirectMethods(sizeof(void*))) { + if (m.IsNative()) { if (count < method_count) { - methods[count].name = m->GetName(); - methods[count].signature = m->GetShorty(); - methods[count].fnPtr = m->GetEntryPointFromJni(); + methods[count].name = m.GetName(); + methods[count].signature = m.GetShorty(); + methods[count].fnPtr = m.GetEntryPointFromJni(); count++; } else { - LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m); + LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(&m); } } } - for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) { - mirror::ArtMethod* m = c->GetVirtualMethod(i); - if (m->IsNative()) { + for (auto& m : c->GetVirtualMethods(sizeof(void*))) { + if (m.IsNative()) { if (count < method_count) { - methods[count].name = m->GetName(); - methods[count].signature = m->GetShorty(); - methods[count].fnPtr = m->GetEntryPointFromJni(); + methods[count].name = m.GetName(); + methods[count].signature = m.GetShorty(); + methods[count].fnPtr = m.GetEntryPointFromJni(); count++; } else { - LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m); + LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(&m); } } } diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h index d2d7fa8a21..7fe31300ab 100644 --- a/runtime/nth_caller_visitor.h +++ b/runtime/nth_caller_visitor.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_NTH_CALLER_VISITOR_H_ #define ART_RUNTIME_NTH_CALLER_VISITOR_H_ +#include "art_method.h" #include "base/mutex.h" -#include "mirror/art_method.h" #include "stack.h" namespace art { @@ -34,7 +34,7 @@ struct NthCallerVisitor : public StackVisitor { caller(nullptr) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); bool do_count = false; if (m == nullptr || m->IsRuntimeMethod()) { // Upcall. @@ -56,7 +56,7 @@ struct NthCallerVisitor : public StackVisitor { const size_t n; const bool include_runtime_and_upcalls_; size_t count; - mirror::ArtMethod* caller; + ArtMethod* caller; }; } // namespace art diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h index a429c8738c..6b3b66643c 100644 --- a/runtime/oat_file-inl.h +++ b/runtime/oat_file-inl.h @@ -22,7 +22,7 @@ namespace art { inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return nullptr; } @@ -39,7 +39,7 @@ inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const { } inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return 0u; } @@ -55,7 +55,7 @@ inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const { } inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return 0u; } @@ -63,7 +63,7 @@ inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const { } inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return 0u; } @@ -71,7 +71,7 @@ inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const { } inline uint32_t OatFile::OatMethod::GetFpSpillMask() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return 0u; } @@ -79,7 +79,7 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const { } const uint8_t* OatFile::OatMethod::GetGcMap() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return nullptr; } @@ -130,7 +130,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const { } inline const uint8_t* OatFile::OatMethod::GetMappingTable() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return nullptr; } @@ -142,7 +142,7 @@ inline const uint8_t* OatFile::OatMethod::GetMappingTable() const { } inline const uint8_t* OatFile::OatMethod::GetVmapTable() const { - const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); if (code == nullptr) { return nullptr; } diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 63ee4b1af0..6fda790697 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -28,14 +28,13 @@ #include "android/dlext.h" #endif +#include "art_method-inl.h" #include "base/bit_vector.h" #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "elf_file.h" #include "elf_utils.h" #include "oat.h" -#include "mirror/art_method.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/object-inl.h" #include "os.h" @@ -693,7 +692,7 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) return OatMethod(oat_file_->Begin(), 0); } -void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const { +void OatFile::OatMethod::LinkMethod(ArtMethod* method) const { CHECK(method != nullptr); method->SetEntryPointFromQuickCompiledCode(GetQuickCode()); } diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 12e9f6cea7..c58b0294c9 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -98,7 +98,7 @@ class OatFile FINAL { class OatMethod FINAL { public: - void LinkMethod(mirror::ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void LinkMethod(ArtMethod* method) const; uint32_t GetCodeOffset() const { return code_offset_; diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc index 749fb5d0bd..f7accc0f31 100644 --- a/runtime/object_lock.cc +++ b/runtime/object_lock.cc @@ -47,7 +47,6 @@ void ObjectLock<T>::NotifyAll() { obj_->NotifyAll(self_); } -template class ObjectLock<mirror::ArtMethod>; template class ObjectLock<mirror::Class>; template class ObjectLock<mirror::Object>; diff --git a/runtime/primitive.h b/runtime/primitive.h index 0ac5f40d55..ca42c4790c 100644 --- a/runtime/primitive.h +++ b/runtime/primitive.h @@ -26,7 +26,6 @@ namespace art { static constexpr size_t kObjectReferenceSize = 4; - constexpr size_t ComponentSizeShiftWidth(size_t component_size) { return component_size == 1u ? 0u : component_size == 2u ? 1u : diff --git a/runtime/profiler.cc b/runtime/profiler.cc index f9218a381c..ab28a9a352 100644 --- a/runtime/profiler.cc +++ b/runtime/profiler.cc @@ -22,6 +22,7 @@ #include <fstream> +#include "art_method-inl.h" #include "base/stl_util.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" @@ -30,7 +31,6 @@ #include "debugger.h" #include "dex_file-inl.h" #include "instrumentation.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" @@ -57,7 +57,7 @@ volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false; // Walk through the method within depth of max_depth_ on the Java stack class BoundedStackVisitor : public StackVisitor { public: - BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack, + BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack, Thread* thread, uint32_t max_depth) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), @@ -66,7 +66,7 @@ class BoundedStackVisitor : public StackVisitor { depth_(0) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; } @@ -81,7 +81,7 @@ class BoundedStackVisitor : public StackVisitor { } private: - std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_; + std::vector<std::pair<ArtMethod*, uint32_t>>* stack_; const uint32_t max_depth_; uint32_t depth_; }; @@ -94,7 +94,7 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu const ProfilerOptions profile_options = profiler->GetProfilerOptions(); switch (profile_options.GetProfileType()) { case kProfilerMethod: { - mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr); + ArtMethod* method = thread->GetCurrentMethod(nullptr); if ((false) && method == nullptr) { LOG(INFO) << "No current method available"; std::ostringstream os; @@ -400,7 +400,7 @@ BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler( // Filter out methods the profiler doesn't want to record. // We require mutator lock since some statistics will be updated here. -bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) { +bool BackgroundMethodSamplingProfiler::ProcessMethod(ArtMethod* method) { if (method == nullptr) { profile_table_.NullMethod(); // Don't record a null method. @@ -435,7 +435,7 @@ bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) // A method has been hit, record its invocation in the method map. // The mutator_lock must be held (shared) when this is called. -void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) { +void BackgroundMethodSamplingProfiler::RecordMethod(ArtMethod* method) { // Add to the profile table unless it is filtered out. if (ProcessMethod(method)) { profile_table_.Put(method); @@ -448,7 +448,7 @@ void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<Instruction return; } // Get the method on top of the stack. We use this method to perform filtering. - mirror::ArtMethod* method = stack.front().first; + ArtMethod* method = stack.front().first; if (ProcessMethod(method)) { profile_table_.PutStack(stack); } @@ -464,7 +464,7 @@ uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) { } // Profile Table. -// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample +// This holds a mapping of ArtMethod* to a count of how many times a sample // hit it at the top of the stack. ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0), num_null_methods_(0), @@ -482,7 +482,7 @@ ProfileSampleResults::~ProfileSampleResults() { // Add a method to the profile table. If it's the first time the method // has been seen, add it with count=1, otherwise increment the count. -void ProfileSampleResults::Put(mirror::ArtMethod* method) { +void ProfileSampleResults::Put(ArtMethod* method) { MutexLock mu(Thread::Current(), lock_); uint32_t index = Hash(method); if (table[index] == nullptr) { @@ -517,7 +517,7 @@ void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stac for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin(); iter != stack.rend(); ++iter) { InstructionLocation inst_loc = *iter; - mirror::ArtMethod* method = inst_loc.first; + ArtMethod* method = inst_loc.first; if (method == nullptr) { // skip null method continue; @@ -577,7 +577,7 @@ uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) { Map *map = table[i]; if (map != nullptr) { for (const auto &meth_iter : *map) { - mirror::ArtMethod *method = meth_iter.first; + ArtMethod *method = meth_iter.first; std::string method_name = PrettyMethod(method); const DexFile::CodeItem* codeitem = method->GetCodeItem(); @@ -709,7 +709,7 @@ void ProfileSampleResults::Clear() { previous_.clear(); } -uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) { +uint32_t ProfileSampleResults::Hash(ArtMethod* method) { return (PointerToLowMemUInt32(method) >> 3) % kHashSize; } diff --git a/runtime/profiler.h b/runtime/profiler.h index ae51c87ce9..7611487da2 100644 --- a/runtime/profiler.h +++ b/runtime/profiler.h @@ -36,12 +36,12 @@ namespace art { namespace mirror { - class ArtMethod; class Class; } // namespace mirror +class ArtMethod; class Thread; -typedef std::pair<mirror::ArtMethod*, uint32_t> InstructionLocation; +typedef std::pair<ArtMethod*, uint32_t> InstructionLocation; // This class stores the sampled bounded stacks in a trie structure. A path of the trie represents // a particular context with the method on top of the stack being a leaf or an internal node of the @@ -104,7 +104,7 @@ class ProfileSampleResults { explicit ProfileSampleResults(Mutex& lock); ~ProfileSampleResults(); - void Put(mirror::ArtMethod* method); + void Put(ArtMethod* method); void PutStack(const std::vector<InstructionLocation>& stack_dump); uint32_t Write(std::ostream &os, ProfileDataType type); void ReadPrevious(int fd, ProfileDataType type); @@ -114,14 +114,14 @@ class ProfileSampleResults { void BootMethod() { ++num_boot_methods_; } private: - uint32_t Hash(mirror::ArtMethod* method); + uint32_t Hash(ArtMethod* method); static constexpr int kHashSize = 17; Mutex& lock_; // Reference to the main profiler lock - we don't need two of them. uint32_t num_samples_; // Total number of samples taken. uint32_t num_null_methods_; // Number of samples where can don't know the method. uint32_t num_boot_methods_; // Number of samples in the boot path. - typedef std::map<mirror::ArtMethod*, uint32_t> Map; // Map of method vs its count. + typedef std::map<ArtMethod*, uint32_t> Map; // Map of method vs its count. Map *table[kHashSize]; typedef std::set<StackTrieNode*> TrieNodeSet; @@ -176,9 +176,9 @@ class BackgroundMethodSamplingProfiler { static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_); static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_); - void RecordMethod(mirror::ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool ProcessMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const ProfilerOptions& GetProfilerOptions() const { return options_; } Barrier& GetBarrier() { diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index 93d1f668f0..f40c0f1130 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -52,8 +52,7 @@ class ProxyTest : public CommonCompilerTest { // Builds the method array. jsize methods_count = 3; // Object.equals, Object.hashCode and Object.toString. for (mirror::Class* interface : interfaces) { - mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods(); - methods_count += (virtual_methods == nullptr) ? 0 : virtual_methods->GetLength(); + methods_count += interface->NumVirtualMethods(); } jobjectArray proxyClassMethods = soa.Env()->NewObjectArray( methods_count, soa.AddLocalReference<jclass>(mirror::Method::StaticClass()), nullptr); @@ -61,28 +60,29 @@ class ProxyTest : public CommonCompilerTest { jsize array_index = 0; // Fill the method array - mirror::ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod( - "equals", "(Ljava/lang/Object;)Z"); + ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod( + "equals", "(Ljava/lang/Object;)Z", sizeof(void*)); CHECK(method != nullptr); soa.Env()->SetObjectArrayElement( proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( mirror::Method::CreateFromArtMethod(soa.Self(), method))); - method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I"); + method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", sizeof(void*)); CHECK(method != nullptr); soa.Env()->SetObjectArrayElement( proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( mirror::Method::CreateFromArtMethod(soa.Self(), method))); - method = javaLangObject->FindDeclaredVirtualMethod("toString", "()Ljava/lang/String;"); + method = javaLangObject->FindDeclaredVirtualMethod( + "toString", "()Ljava/lang/String;", sizeof(void*)); CHECK(method != nullptr); soa.Env()->SetObjectArrayElement( proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( mirror::Method::CreateFromArtMethod(soa.Self(), method))); // Now adds all interfaces virtual methods. for (mirror::Class* interface : interfaces) { - for (int32_t i = 0, count = interface->NumVirtualMethods(); i < count; ++i) { + for (auto& m : interface->GetVirtualMethods(sizeof(void*))) { soa.Env()->SetObjectArrayElement( proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( - mirror::Method::CreateFromArtMethod(soa.Self(), interface->GetVirtualMethod(i)))); + mirror::Method::CreateFromArtMethod(soa.Self(), &m))); } } CHECK_EQ(array_index, methods_count); diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc index 1c404ff141..99e262e6a9 100644 --- a/runtime/quick/inline_method_analyser.cc +++ b/runtime/quick/inline_method_analyser.cc @@ -17,11 +17,11 @@ #include "inline_method_analyser.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "dex_instruction-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "verifier/method_verifier-inl.h" @@ -330,8 +330,9 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(uint32_t field_idx, bool i InlineIGetIPutData* result) { mirror::DexCache* dex_cache = verifier->GetDexCache(); uint32_t method_idx = verifier->GetMethodReference().dex_method_index; - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx); - ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(field_idx, dex_cache); + auto* cl = Runtime::Current()->GetClassLinker(); + ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, cl->GetImagePointerSize()); + ArtField* field = cl->GetResolvedField(field_idx, dex_cache); if (method == nullptr || field == nullptr || field->IsStatic()) { return false; } diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 730759a71b..8c9782aefe 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -17,11 +17,11 @@ #include "quick_exception_handler.h" #include "arch/context.h" +#include "art_method-inl.h" #include "dex_instruction.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "handle_scope-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/throwable.h" @@ -53,14 +53,14 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { } bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = GetMethod(); + ArtMethod* method = GetMethod(); exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); if (method == nullptr) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc()); exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame()); uint32_t next_dex_pc; - mirror::ArtMethod* next_art_method; + ArtMethod* next_art_method; bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc); // Report the method that did the down call as the handler. exception_handler_->SetHandlerDexPc(next_dex_pc); @@ -78,12 +78,11 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { DCHECK(method->IsCalleeSaveMethod()); return true; } - StackHandleScope<1> hs(self_); - return HandleTryItems(hs.NewHandle(method)); + return HandleTryItems(method); } private: - bool HandleTryItems(Handle<mirror::ArtMethod> method) + bool HandleTryItems(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t dex_pc = DexFile::kDexNoIndex; if (!method->IsNative()) { @@ -91,13 +90,12 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { } if (dex_pc != DexFile::kDexNoIndex) { bool clear_exception = false; - StackHandleScope<1> hs(Thread::Current()); + StackHandleScope<1> hs(self_); Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass())); - uint32_t found_dex_pc = mirror::ArtMethod::FindCatchBlock(method, to_find, dex_pc, - &clear_exception); + uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception); exception_handler_->SetClearException(clear_exception); if (found_dex_pc != DexFile::kDexNoIndex) { - exception_handler_->SetHandlerMethod(method.Get()); + exception_handler_->SetHandlerMethod(method); exception_handler_->SetHandlerDexPc(found_dex_pc); exception_handler_->SetHandlerQuickFramePc(method->ToNativeQuickPc(found_dex_pc)); exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame()); @@ -132,7 +130,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) { visitor.WalkStack(true); if (kDebugExceptionDelivery) { - if (handler_quick_frame_->AsMirrorPtr() == nullptr) { + if (*handler_quick_frame_ == nullptr) { LOG(INFO) << "Handler is upcall"; } if (handler_method_ != nullptr) { @@ -171,7 +169,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); - mirror::ArtMethod* method = GetMethod(); + ArtMethod* method = GetMethod(); if (method == nullptr) { // This is the upcall, we remember the frame and last pc so that we may long jump to them. exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc()); @@ -191,23 +189,21 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { return static_cast<VRegKind>(kinds.at(reg * 2)); } - bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); CHECK(code_item != nullptr); uint16_t num_regs = code_item->registers_size_; uint32_t dex_pc = GetDexPc(); - StackHandleScope<3> hs(self_); // Dex cache, class loader and method. + StackHandleScope<2> hs(self_); // Dex cache, class loader and method. mirror::Class* declaring_class = m->GetDeclaringClass(); Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache())); Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader())); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(m)); verifier::MethodVerifier verifier(self_, h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader, &m->GetClassDef(), code_item, m->GetDexMethodIndex(), - h_method, m->GetAccessFlags(), true, true, true, true); + m, m->GetAccessFlags(), true, true, true, true); bool verifier_success = verifier.Verify(); - CHECK(verifier_success) << PrettyMethod(h_method.Get()); - ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame( - num_regs, nullptr, h_method.Get(), dex_pc); + CHECK(verifier_success) << PrettyMethod(m); + ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc); self_->SetShadowFrameUnderConstruction(new_frame); const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc)); @@ -230,7 +226,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { // Check IsReferenceVReg in case the compiled GC map doesn't agree with the verifier. // We don't want to copy a stale reference into the shadow frame as a reference. // b/20736048 - if (GetVReg(h_method.Get(), reg, kind, &value) && IsReferenceVReg(h_method.Get(), reg)) { + if (GetVReg(m, reg, kind, &value) && IsReferenceVReg(m, reg)) { new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value)); } else { new_frame->SetVReg(reg, kDeadValue); @@ -241,14 +237,14 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { if (GetVRegKind(reg + 1, kinds) == kLongHiVReg) { // Treat it as a "long" register pair. uint64_t value = 0; - if (GetVRegPair(h_method.Get(), reg, kLongLoVReg, kLongHiVReg, &value)) { + if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &value)) { new_frame->SetVRegLong(reg, value); } else { new_frame->SetVRegLong(reg, kLongDeadValue); } } else { uint32_t value = 0; - if (GetVReg(h_method.Get(), reg, kind, &value)) { + if (GetVReg(m, reg, kind, &value)) { new_frame->SetVReg(reg, value); } else { new_frame->SetVReg(reg, kDeadValue); @@ -260,7 +256,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { // Nothing to do: we treated it as a "long" register pair. } else { uint32_t value = 0; - if (GetVReg(h_method.Get(), reg, kind, &value)) { + if (GetVReg(m, reg, kind, &value)) { new_frame->SetVReg(reg, value); } else { new_frame->SetVReg(reg, kDeadValue); @@ -270,7 +266,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { case kDoubleLoVReg: if (GetVRegKind(reg + 1, kinds) == kDoubleHiVReg) { uint64_t value = 0; - if (GetVRegPair(h_method.Get(), reg, kDoubleLoVReg, kDoubleHiVReg, &value)) { + if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &value)) { // Treat it as a "double" register pair. new_frame->SetVRegLong(reg, value); } else { @@ -278,7 +274,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { } } else { uint32_t value = 0; - if (GetVReg(h_method.Get(), reg, kind, &value)) { + if (GetVReg(m, reg, kind, &value)) { new_frame->SetVReg(reg, value); } else { new_frame->SetVReg(reg, kDeadValue); @@ -290,7 +286,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { // Nothing to do: we treated it as a "double" register pair. } else { uint32_t value = 0; - if (GetVReg(h_method.Get(), reg, kind, &value)) { + if (GetVReg(m, reg, kind, &value)) { new_frame->SetVReg(reg, value); } else { new_frame->SetVReg(reg, kDeadValue); @@ -299,7 +295,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { break; default: uint32_t value = 0; - if (GetVReg(h_method.Get(), reg, kind, &value)) { + if (GetVReg(m, reg, kind, &value)) { new_frame->SetVReg(reg, value); } else { new_frame->SetVReg(reg, kDeadValue); diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h index 7ee4118a8b..8d7cd12216 100644 --- a/runtime/quick_exception_handler.h +++ b/runtime/quick_exception_handler.h @@ -25,9 +25,9 @@ namespace art { namespace mirror { -class ArtMethod; class Throwable; } // namespace mirror +class ArtMethod; class Context; class Thread; class ShadowFrame; @@ -48,7 +48,7 @@ class QuickExceptionHandler { void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetHandlerQuickFrame(StackReference<mirror::ArtMethod>* handler_quick_frame) { + void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) { handler_quick_frame_ = handler_quick_frame; } @@ -56,11 +56,11 @@ class QuickExceptionHandler { handler_quick_frame_pc_ = handler_quick_frame_pc; } - mirror::ArtMethod* GetHandlerMethod() const { + ArtMethod* GetHandlerMethod() const { return handler_method_; } - void SetHandlerMethod(mirror::ArtMethod* handler_quick_method) { + void SetHandlerMethod(ArtMethod* handler_quick_method) { handler_method_ = handler_quick_method; } @@ -87,11 +87,11 @@ class QuickExceptionHandler { // Is method tracing active? const bool method_tracing_active_; // Quick frame with found handler or last frame if no handler found. - StackReference<mirror::ArtMethod>* handler_quick_frame_; + ArtMethod** handler_quick_frame_; // PC to branch to for the handler. uintptr_t handler_quick_frame_pc_; // The handler method to report to the debugger. - mirror::ArtMethod* handler_method_; + ArtMethod* handler_method_; // The handler's dex PC, zero implies an uncaught exception. uint32_t handler_dex_pc_; // Should the exception be cleared as the catch block has no move-exception? diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h index 52d83a24f6..aa72e97328 100644 --- a/runtime/read_barrier.h +++ b/runtime/read_barrier.h @@ -28,12 +28,11 @@ // which needs to be a C header file for asm_support.h. namespace art { - namespace mirror { - class ArtMethod; class Object; template<typename MirrorType> class HeapReference; } // namespace mirror +class ArtMethod; class ReadBarrier { public: diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc index 4ffebf2c5f..fae8e722c3 100644 --- a/runtime/reference_table_test.cc +++ b/runtime/reference_table_test.cc @@ -18,7 +18,9 @@ #include "common_runtime_test.h" #include "mirror/array-inl.h" +#include "mirror/class-inl.h" #include "mirror/string.h" +#include "primitive.h" #include "scoped_thread_state_change.h" #include "thread-inl.h" diff --git a/runtime/reflection.cc b/runtime/reflection.cc index f8c70815b2..11522d9914 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -17,6 +17,7 @@ #include "reflection-inl.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "class_linker.h" #include "common_throws.h" #include "dex_file-inl.h" @@ -24,7 +25,6 @@ #include "indirect_reference_table-inl.h" #include "jni_internal.h" #include "mirror/abstract_method.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "nth_caller_visitor.h" @@ -213,10 +213,9 @@ class ArgArray { } bool BuildArgArrayFromObjectArray(mirror::Object* receiver, - mirror::ObjectArray<mirror::Object>* args, - Handle<mirror::ArtMethod> h_m) + mirror::ObjectArray<mirror::Object>* args, ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile::TypeList* classes = h_m->GetParameterTypeList(); + const DexFile::TypeList* classes = m->GetParameterTypeList(); // Set receiver if non-null (method is not static) if (receiver != nullptr) { Append(receiver); @@ -225,11 +224,11 @@ class ArgArray { mirror::Object* arg = args->Get(args_offset); if (((shorty_[i] == 'L') && (arg != nullptr)) || ((arg == nullptr && shorty_[i] != 'L'))) { mirror::Class* dst_class = - h_m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true); + m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true); if (UNLIKELY(arg == nullptr || !arg->InstanceOf(dst_class))) { ThrowIllegalArgumentException( StringPrintf("method %s argument %zd has type %s, got %s", - PrettyMethod(h_m.Get(), false).c_str(), + PrettyMethod(m, false).c_str(), args_offset + 1, // Humans don't count from 0. PrettyDescriptor(dst_class).c_str(), PrettyTypeOf(arg).c_str()).c_str()); @@ -257,7 +256,7 @@ class ArgArray { } else { \ ThrowIllegalArgumentException(\ StringPrintf("method %s argument %zd has type %s, got %s", \ - PrettyMethod(h_m.Get(), false).c_str(), \ + PrettyMethod(m, false).c_str(), \ args_offset + 1, \ expected, \ PrettyTypeOf(arg).c_str()).c_str()); \ @@ -343,7 +342,7 @@ class ArgArray { std::unique_ptr<uint32_t[]> large_arg_array_; }; -static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* args) +static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile::TypeList* params = m->GetParameterTypeList(); if (params == nullptr) { @@ -356,16 +355,14 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* offset = 1; } // TODO: If args contain object references, it may cause problems. - Thread* self = Thread::Current(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> h_m(hs.NewHandle(m)); + Thread* const self = Thread::Current(); for (uint32_t i = 0; i < num_params; i++) { uint16_t type_idx = params->GetTypeItem(i).type_idx_; - mirror::Class* param_type = h_m->GetClassFromTypeIndex(type_idx, true); + mirror::Class* param_type = m->GetClassFromTypeIndex(type_idx, true); if (param_type == nullptr) { CHECK(self->IsExceptionPending()); LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: " - << h_m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n" + << m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n" << self->GetException()->Dump(); self->ClearException(); ++error_count; @@ -378,7 +375,7 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* if (argument != nullptr && !argument->InstanceOf(param_type)) { LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of " << PrettyTypeOf(argument) << " as argument " << (i + 1) - << " to " << PrettyMethod(h_m.Get()); + << " to " << PrettyMethod(m); ++error_count; } } else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) { @@ -388,25 +385,25 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* if (param_type->IsPrimitiveBoolean()) { if (arg != JNI_TRUE && arg != JNI_FALSE) { LOG(ERROR) << "JNI ERROR (app bug): expected jboolean (0/1) but got value of " - << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get()); + << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m); ++error_count; } } else if (param_type->IsPrimitiveByte()) { if (arg < -128 || arg > 127) { LOG(ERROR) << "JNI ERROR (app bug): expected jbyte but got value of " - << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get()); + << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m); ++error_count; } } else if (param_type->IsPrimitiveChar()) { if (args[i + offset] > 0xFFFF) { LOG(ERROR) << "JNI ERROR (app bug): expected jchar but got value of " - << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get()); + << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m); ++error_count; } } else if (param_type->IsPrimitiveShort()) { if (arg < -32768 || arg > 0x7FFF) { LOG(ERROR) << "JNI ERROR (app bug): expected jshort but got value of " - << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get()); + << arg << " as argument " << (i + 1) << " to " << PrettyMethod(m); ++error_count; } } @@ -416,24 +413,23 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* // TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort // with an argument. vm->JniAbortF(nullptr, "bad arguments passed to %s (see above for details)", - PrettyMethod(h_m.Get()).c_str()); + PrettyMethod(m).c_str()); } } -static mirror::ArtMethod* FindVirtualMethod(mirror::Object* receiver, - mirror::ArtMethod* method) +static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method); + return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*)); } static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa, - mirror::ArtMethod* method, ArgArray* arg_array, JValue* result, + ArtMethod* method, ArgArray* arg_array, JValue* result, const char* shorty) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t* args = arg_array->GetArray(); if (UNLIKELY(soa.Env()->check_jni)) { - CheckMethodArguments(soa.Vm(), method, args); + CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args); } method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty); } @@ -449,7 +445,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o return JValue(); } - mirror::ArtMethod* method = soa.DecodeMethod(mid); + ArtMethod* method = soa.DecodeMethod(mid); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -479,7 +475,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o return JValue(); } - mirror::ArtMethod* method = soa.DecodeMethod(mid); + ArtMethod* method = soa.DecodeMethod(mid); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -510,7 +506,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab } mirror::Object* receiver = soa.Decode<mirror::Object*>(obj); - mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); + ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -541,7 +537,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab } mirror::Object* receiver = soa.Decode<mirror::Object*>(obj); - mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); + ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -574,7 +570,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod); const bool accessible = abstract_method->IsAccessible(); - mirror::ArtMethod* m = abstract_method->GetArtMethod(); + ArtMethod* m = abstract_method->GetArtMethod(); mirror::Class* declaring_class = m->GetDeclaringClass(); if (UNLIKELY(!declaring_class->IsInitialized())) { @@ -601,13 +597,14 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM } // Find the actual implementation of the virtual method. - m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m); + m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, sizeof(void*)); } } // Get our arrays of arguments and their types, and check they're the same size. auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs); - const DexFile::TypeList* classes = m->GetParameterTypeList(); + auto* np_method = m->GetInterfaceMethodIfProxy(sizeof(void*)); + const DexFile::TypeList* classes = np_method->GetParameterTypeList(); uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size(); uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0; if (arg_count != classes_size) { @@ -633,11 +630,9 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM // Invoke the method. JValue result; uint32_t shorty_len = 0; - const char* shorty = m->GetShorty(&shorty_len); + const char* shorty = np_method->GetShorty(&shorty_len); ArgArray arg_array(shorty, shorty_len); - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ArtMethod> h_m(hs.NewHandle(m)); - if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, h_m)) { + if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, np_method)) { CHECK(soa.Self()->IsExceptionPending()); return nullptr; } diff --git a/runtime/reflection.h b/runtime/reflection.h index df3b9d3694..825a7213ce 100644 --- a/runtime/reflection.h +++ b/runtime/reflection.h @@ -23,11 +23,11 @@ namespace art { namespace mirror { - class ArtMethod; class Class; class Object; } // namespace mirror class ArtField; +class ArtMethod; union JValue; class ScopedObjectAccessAlreadyRunnable; class ShadowFrame; diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index 36e444a061..6f17e7d42c 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -20,8 +20,8 @@ #include <limits.h> #include "ScopedLocalRef.h" +#include "art_method-inl.h" #include "common_compiler_test.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" namespace art { @@ -81,7 +81,7 @@ class ReflectionTest : public CommonCompilerTest { return soa.AddLocalReference<jclass>(c); } - void ReflectionTestMakeExecutable(mirror::ArtMethod** method, + void ReflectionTestMakeExecutable(ArtMethod** method, mirror::Object** receiver, bool is_static, const char* method_name, const char* method_signature) @@ -107,8 +107,8 @@ class ReflectionTest : public CommonCompilerTest { class_loader); CHECK(c != nullptr); - *method = is_static ? c->FindDirectMethod(method_name, method_signature) - : c->FindVirtualMethod(method_name, method_signature); + *method = is_static ? c->FindDirectMethod(method_name, method_signature, sizeof(void*)) + : c->FindVirtualMethod(method_name, method_signature, sizeof(void*)); CHECK(method != nullptr); if (is_static) { @@ -130,7 +130,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeNopMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -139,7 +139,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeIdentityByteMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -164,7 +164,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeIdentityIntMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -189,7 +189,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeIdentityDoubleMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -214,7 +214,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumIntIntMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -243,7 +243,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumIntIntIntMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -282,7 +282,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumIntIntIntIntMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -326,7 +326,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumIntIntIntIntIntMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -375,7 +375,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumDoubleDoubleMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -409,7 +409,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumDoubleDoubleDoubleMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -436,7 +436,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -466,7 +466,7 @@ class ReflectionTest : public CommonCompilerTest { void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) { ScopedObjectAccess soa(env_); - mirror::ArtMethod* method; + ArtMethod* method; mirror::Object* receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); @@ -515,7 +515,7 @@ TEST_F(ReflectionTest, StaticMainMethod) { mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader); ASSERT_TRUE(klass != nullptr); - mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V"); + ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V", sizeof(void*)); ASSERT_TRUE(method != nullptr); // Start runtime. diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index a82bc85993..68d5ad2f6e 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -19,7 +19,7 @@ #include "runtime.h" -#include "mirror/art_method.h" +#include "art_method.h" #include "read_barrier-inl.h" namespace art { @@ -34,52 +34,46 @@ inline mirror::Object* Runtime::GetClearedJniWeakGlobal() { return obj; } -inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) { +inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) { DCHECK(method != nullptr); // Cannot be imt-conflict-method or resolution-method. - DCHECK(method != GetImtConflictMethod()); - DCHECK(method != GetResolutionMethod()); + DCHECK_NE(method, GetImtConflictMethod()); + DCHECK_NE(method, GetResolutionMethod()); // Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods. if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsAndArgs)) { return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAll)) { return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll); } else { - DCHECK(method == GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly)); + DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly)); return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly); } } -inline mirror::ArtMethod* Runtime::GetResolutionMethod() { +inline ArtMethod* Runtime::GetResolutionMethod() { CHECK(HasResolutionMethod()); - return resolution_method_.Read(); + return resolution_method_; } -inline mirror::ArtMethod* Runtime::GetImtConflictMethod() { +inline ArtMethod* Runtime::GetImtConflictMethod() { CHECK(HasImtConflictMethod()); - return imt_conflict_method_.Read(); + return imt_conflict_method_; } -inline mirror::ArtMethod* Runtime::GetImtUnimplementedMethod() { - CHECK(!imt_unimplemented_method_.IsNull()); - return imt_unimplemented_method_.Read(); +inline ArtMethod* Runtime::GetImtUnimplementedMethod() { + CHECK(imt_unimplemented_method_ != nullptr); + return imt_unimplemented_method_; } -inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(HasDefaultImt()); - return default_imt_.Read(); -} - -inline mirror::ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type) +inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(HasCalleeSaveMethod(type)); - return callee_save_methods_[type].Read(); + return GetCalleeSaveMethodUnchecked(type); } -inline mirror::ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) +inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return callee_save_methods_[type].Read(); + return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]); } } // namespace art diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 2618661ae3..65ea77ad29 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -49,6 +49,7 @@ #include "arch/x86_64/quick_method_frame_info_x86_64.h" #include "arch/x86_64/registers_x86_64.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "asm_support.h" #include "atomic.h" #include "base/arena_allocator.h" @@ -73,7 +74,6 @@ #include "jni_internal.h" #include "linear_alloc.h" #include "mirror/array.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/field.h" @@ -189,6 +189,7 @@ Runtime::Runtime() is_native_bridge_loaded_(false), zygote_max_failed_boots_(0) { CheckAsmSupportOffsetsAndSizes(); + std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u); } Runtime::~Runtime() { @@ -425,20 +426,20 @@ static jobject CreateSystemClassLoader(Runtime* runtime) { ScopedObjectAccess soa(Thread::Current()); ClassLinker* cl = Runtime::Current()->GetClassLinker(); + auto pointer_size = cl->GetImagePointerSize(); StackHandleScope<2> hs(soa.Self()); Handle<mirror::Class> class_loader_class( hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader))); CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true)); - mirror::ArtMethod* getSystemClassLoader = - class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;"); + ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod( + "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size); CHECK(getSystemClassLoader != nullptr); JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr); JNIEnv* env = soa.Self()->GetJniEnv(); - ScopedLocalRef<jobject> system_class_loader(env, - soa.AddLocalReference<jobject>(result.GetL())); + ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL())); CHECK(system_class_loader.get() != nullptr); soa.Self()->SetClassLoaderOverride(system_class_loader.get()); @@ -867,18 +868,17 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) } jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options)); - bool use_jit = jit_options_->UseJIT(); if (IsAotCompiler()) { // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in // this case. // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns // null and we don't create the jit. - use_jit = false; + jit_options_->SetUseJIT(false); } // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but // can't be trimmed as easily. - const bool use_malloc = !use_jit; + const bool use_malloc = IsAotCompiler(); arena_pool_.reset(new ArenaPool(use_malloc, false)); if (IsCompiler() && Is64BitInstructionSet(kRuntimeISA)) { // 4gb, no malloc. Explanation in header. @@ -1089,6 +1089,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) } VLOG(startup) << "Runtime::Init exiting"; + return true; } @@ -1311,7 +1312,6 @@ mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() { void Runtime::VisitConstantRoots(RootVisitor* visitor) { // Visit the classes held as static in mirror classes, these can be visited concurrently and only // need to be visited once per GC since they never change. - mirror::ArtMethod::VisitRoots(visitor); mirror::Class::VisitRoots(visitor); mirror::Constructor::VisitRoots(visitor); mirror::Reference::VisitRoots(visitor); @@ -1329,6 +1329,24 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) { mirror::PrimitiveArray<int32_t>::VisitRoots(visitor); // IntArray mirror::PrimitiveArray<int64_t>::VisitRoots(visitor); // LongArray mirror::PrimitiveArray<int16_t>::VisitRoots(visitor); // ShortArray + // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are + // null. + BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal)); + if (HasResolutionMethod()) { + resolution_method_->VisitRoots(buffered_visitor); + } + if (HasImtConflictMethod()) { + imt_conflict_method_->VisitRoots(buffered_visitor); + } + if (imt_unimplemented_method_ != nullptr) { + imt_unimplemented_method_->VisitRoots(buffered_visitor); + } + for (size_t i = 0; i < kLastCalleeSaveType; ++i) { + auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]); + if (m != nullptr) { + m->VisitRoots(buffered_visitor); + } + } } void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) { @@ -1350,17 +1368,9 @@ void Runtime::VisitNonThreadRoots(RootVisitor* visitor) { java_vm_->VisitRoots(visitor); sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - resolution_method_.VisitRoot(visitor, RootInfo(kRootVMInternal)); pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - imt_conflict_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - imt_unimplemented_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - default_imt_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { - callee_save_methods_[i].VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal)); - } verifier::MethodVerifier::VisitStaticRoots(visitor); VisitTransactionRoots(visitor); - instrumentation_.VisitRoots(visitor); } void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor) { @@ -1399,73 +1409,43 @@ void Runtime::VisitImageRoots(RootVisitor* visitor) { } } -mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* cl) { - Thread* self = Thread::Current(); - StackHandleScope<1> hs(self); - Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable( - hs.NewHandle(cl->AllocArtMethodArray(self, 64))); - mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod(); - for (size_t i = 0; i < static_cast<size_t>(imtable->GetLength()); i++) { - imtable->Set<false>(i, imt_conflict_method); - } - return imtable.Get(); -} - -mirror::ArtMethod* Runtime::CreateImtConflictMethod() { - Thread* self = Thread::Current(); - Runtime* runtime = Runtime::Current(); - ClassLinker* class_linker = runtime->GetClassLinker(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self))); - method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod()); - // TODO: use a special method for imt conflict method saves. - method->SetDexMethodIndex(DexFile::kDexNoIndex); +ArtMethod* Runtime::CreateImtConflictMethod() { + auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(); // When compiling, the code pointer will get set later when the image is loaded. - if (runtime->IsAotCompiler()) { + if (IsAotCompiler()) { size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size); } else { method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub()); } - return method.Get(); + return method; } -void Runtime::SetImtConflictMethod(mirror::ArtMethod* method) { - imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method); +void Runtime::SetImtConflictMethod(ArtMethod* method) { + CHECK(method != nullptr); + CHECK(method->IsRuntimeMethod()); + imt_conflict_method_ = method; } -mirror::ArtMethod* Runtime::CreateResolutionMethod() { - Thread* self = Thread::Current(); - Runtime* runtime = Runtime::Current(); - ClassLinker* class_linker = runtime->GetClassLinker(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self))); - method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod()); - // TODO: use a special method for resolution method saves - method->SetDexMethodIndex(DexFile::kDexNoIndex); +ArtMethod* Runtime::CreateResolutionMethod() { + auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(); // When compiling, the code pointer will get set later when the image is loaded. - if (runtime->IsAotCompiler()) { + if (IsAotCompiler()) { size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size); } else { method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub()); } - return method.Get(); + return method; } -mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() { - Thread* self = Thread::Current(); - Runtime* runtime = Runtime::Current(); - ClassLinker* class_linker = runtime->GetClassLinker(); - StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self))); - method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod()); - // TODO: use a special method for callee saves - method->SetDexMethodIndex(DexFile::kDexNoIndex); +ArtMethod* Runtime::CreateCalleeSaveMethod() { + auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(); size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size); DCHECK_NE(instruction_set_, kNone); - return method.Get(); + DCHECK(method->IsRuntimeMethod()); + return method; } void Runtime::DisallowNewSystemWeaks() { @@ -1525,15 +1505,16 @@ void Runtime::SetInstructionSet(InstructionSet instruction_set) { } } -void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) { +void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) { DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType)); - callee_save_methods_[type] = GcRoot<mirror::ArtMethod>(method); + CHECK(method != nullptr); + callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method); } void Runtime::StartProfiler(const char* profile_output_filename) { profile_output_filename_ = profile_output_filename; profiler_started_ = - BackgroundMethodSamplingProfiler::Start(profile_output_filename_, profiler_options_); + BackgroundMethodSamplingProfiler::Start(profile_output_filename_, profiler_options_); } // Transaction support. @@ -1550,7 +1531,6 @@ void Runtime::ExitTransactionMode() { preinitialization_transaction_ = nullptr; } - bool Runtime::IsTransactionAborted() const { if (!IsActiveTransaction()) { return false; @@ -1709,4 +1689,16 @@ bool Runtime::IsCompilingBootImage() const { return IsCompiler() && compiler_callbacks_->IsBootImage(); } +void Runtime::SetResolutionMethod(ArtMethod* method) { + CHECK(method != nullptr); + CHECK(method->IsRuntimeMethod()) << method; + resolution_method_ = method; +} + +void Runtime::SetImtUnimplementedMethod(ArtMethod* method) { + CHECK(method != nullptr); + CHECK(method->IsRuntimeMethod()); + imt_unimplemented_method_ = method; +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index 348d5c6b52..e569333bf0 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -41,10 +41,6 @@ namespace art { -class ArenaPool; -class CompilerCallbacks; -class LinearAlloc; - namespace gc { class Heap; namespace collector { @@ -58,7 +54,6 @@ namespace jit { } // namespace jit namespace mirror { - class ArtMethod; class ClassLoader; class Array; template<class T> class ObjectArray; @@ -70,11 +65,15 @@ namespace mirror { namespace verifier { class MethodVerifier; } // namespace verifier +class ArenaPool; +class ArtMethod; class ClassLinker; class Closure; +class CompilerCallbacks; class DexFile; class InternTable; class JavaVMExt; +class LinearAlloc; class MonitorList; class MonitorPool; class NullPointerHandler; @@ -99,6 +98,9 @@ enum VisitRootFlags : uint8_t { kVisitRootFlagStartLoggingNewRoots = 0x4, kVisitRootFlagStopLoggingNewRoots = 0x8, kVisitRootFlagClearRootLog = 0x10, + // Non moving means we can have optimizations where we don't visit some roots if they are + // definitely reachable from another location. E.g. ArtMethod and ArtField roots. + kVisitRootFlagNonMoving = 0x20, }; class Runtime { @@ -342,47 +344,28 @@ class Runtime { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime method resolution - mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasResolutionMethod() const { - return !resolution_method_.IsNull(); + return resolution_method_ != nullptr; } - void SetResolutionMethod(mirror::ArtMethod* method) { - resolution_method_ = GcRoot<mirror::ArtMethod>(method); - } + void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime imt conflicts. - mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool HasImtConflictMethod() const { - return !imt_conflict_method_.IsNull(); + return imt_conflict_method_ != nullptr; } - void SetImtConflictMethod(mirror::ArtMethod* method); - void SetImtUnimplementedMethod(mirror::ArtMethod* method) { - imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method); - } + void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // Returns an imt with every entry set to conflict, used as default imt for all classes. - mirror::ObjectArray<mirror::ArtMethod>* GetDefaultImt() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - bool HasDefaultImt() const { - return !default_imt_.IsNull(); - } - - void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) { - default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt); - } - - mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -393,20 +376,20 @@ class Runtime { }; bool HasCalleeSaveMethod(CalleeSaveType type) const { - return !callee_save_methods_[type].IsNull(); + return callee_save_methods_[type] != 0u; } - mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) + ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) + ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const { return callee_save_method_frame_infos_[type]; } - QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) + QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { @@ -419,9 +402,9 @@ class Runtime { void SetInstructionSet(InstructionSet instruction_set); - void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type); + void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type); - mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -588,15 +571,15 @@ class Runtime { static constexpr int kProfileForground = 0; static constexpr int kProfileBackgrouud = 1; - GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType]; + // 64 bit so that we can share the same asm offsets for both 32 and 64 bits. + uint64_t callee_save_methods_[kLastCalleeSaveType]; GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_; GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_; - GcRoot<mirror::ArtMethod> resolution_method_; - GcRoot<mirror::ArtMethod> imt_conflict_method_; + ArtMethod* resolution_method_; + ArtMethod* imt_conflict_method_; // Unresolved method has the same behavior as the conflict method, it is used by the class linker // for differentiating between unfilled imt slots vs conflict slots in superclasses. - GcRoot<mirror::ArtMethod> imt_unimplemented_method_; - GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_; + ArtMethod* imt_unimplemented_method_; // Special sentinel object used to invalid conditions in JNI (cleared weak references) and // JDWP (invalid references). diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h index 60ed55a914..1cc2df65ba 100644 --- a/runtime/scoped_thread_state_change.h +++ b/runtime/scoped_thread_state_change.h @@ -158,20 +158,15 @@ class ScopedObjectAccessAlreadyRunnable { return reinterpret_cast<jfieldID>(field); } - mirror::ArtMethod* DecodeMethod(jmethodID mid) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - CHECK(!kMovingMethods); - mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid); - return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method); + return reinterpret_cast<ArtMethod*>(mid); } - jmethodID EncodeMethod(mirror::ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - CHECK(!kMovingMethods); return reinterpret_cast<jmethodID>(method); } diff --git a/runtime/stack.cc b/runtime/stack.cc index 800acaa320..6cca4d29b9 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -17,12 +17,14 @@ #include "stack.h" #include "arch/context.h" +#include "art_method-inl.h" #include "base/hex_dump.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc_map.h" -#include "mirror/art_method-inl.h" +#include "gc/space/image_space.h" +#include "gc/space/space-inl.h" +#include "linear_alloc.h" #include "mirror/class-inl.h" -#include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "quick/quick_method_frame_info.h" @@ -34,8 +36,10 @@ namespace art { +static constexpr bool kDebugStackWalk = false; + mirror::Object* ShadowFrame::GetThisObject() const { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsStatic()) { return nullptr; } else if (m->IsNative()) { @@ -49,7 +53,7 @@ mirror::Object* ShadowFrame::GetThisObject() const { } mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsStatic()) { return nullptr; } else { @@ -113,11 +117,12 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { } } -extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp) +extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Object* StackVisitor::GetThisObject() const { - mirror::ArtMethod* m = GetMethod(); + DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); + ArtMethod* m = GetMethod(); if (m->IsStatic()) { return nullptr; } else if (m->IsNative()) { @@ -156,7 +161,7 @@ size_t StackVisitor::GetNativePcOffset() const { return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_); } -bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) { +bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) { // Process register map (which native and runtime methods don't have) if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) { return false; @@ -183,8 +188,7 @@ bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) { return vreg < num_regs && TestBitmap(vreg, reg_bitmap); } -bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, - uint32_t* val) const { +bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { if (cur_quick_frame_ != nullptr) { DCHECK(context_ != nullptr); // You can't reliably read registers without a context. DCHECK(m == GetMethod()); @@ -200,7 +204,7 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, } } -bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, +bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); DCHECK(code_pointer != nullptr); @@ -223,7 +227,7 @@ bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRe } } -bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, +bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); DCHECK(code_pointer != nullptr); @@ -287,7 +291,7 @@ bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t return true; } -bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, +bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const { if (kind_lo == kLongLoVReg) { DCHECK_EQ(kind_hi, kLongHiVReg); @@ -312,7 +316,7 @@ bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kin } } -bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, +bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const { const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); DCHECK(code_pointer != nullptr); @@ -339,7 +343,7 @@ bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, } } -bool StackVisitor::GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, +bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const { uint32_t low_32bits; @@ -371,7 +375,7 @@ bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, return true; } -bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, +bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) { if (cur_quick_frame_ != nullptr) { DCHECK(context_ != nullptr); // You can't reliably write registers without a context. @@ -387,7 +391,7 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val } } -bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, +bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) { DCHECK(context_ != nullptr); // You can't reliably write registers without a context. DCHECK(m == GetMethod()); @@ -445,7 +449,7 @@ bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRe return true; } -bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, +bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) { if (kind_lo == kLongLoVReg) { DCHECK_EQ(kind_hi, kLongHiVReg); @@ -470,7 +474,7 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new } bool StackVisitor::SetVRegPairFromQuickCode( - mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) { + ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) { const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); DCHECK(code_pointer != nullptr); const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); @@ -586,7 +590,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) { return visitor.frames; } -bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) { +bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) { struct HasMoreFramesVisitor : public StackVisitor { HasMoreFramesVisitor(Thread* thread, StackWalkKind walk_kind, @@ -602,7 +606,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32 bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (found_frame_) { - mirror::ArtMethod* method = GetMethod(); + ArtMethod* method = GetMethod(); if (method != nullptr && !method->IsRuntimeMethod()) { has_more_frames_ = true; next_method_ = method; @@ -618,7 +622,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32 size_t frame_height_; bool found_frame_; bool has_more_frames_; - mirror::ArtMethod* next_method_; + ArtMethod* next_method_; uint32_t next_dex_pc_; }; HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight()); @@ -644,7 +648,7 @@ void StackVisitor::DescribeStack(Thread* thread) { std::string StackVisitor::DescribeLocation() const { std::string result("Visiting method '"); - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m == nullptr) { return "upcall"; } @@ -664,8 +668,34 @@ static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame( void StackVisitor::SanityCheckFrame() const { if (kIsDebugBuild) { - mirror::ArtMethod* method = GetMethod(); - CHECK_EQ(method->GetClass(), mirror::ArtMethod::GetJavaLangReflectArtMethod()); + ArtMethod* method = GetMethod(); + auto* declaring_class = method->GetDeclaringClass(); + // Runtime methods have null declaring class. + if (!method->IsRuntimeMethod()) { + CHECK(declaring_class != nullptr); + CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass()) + << declaring_class; + } else { + CHECK(declaring_class == nullptr); + } + auto* runtime = Runtime::Current(); + auto* la = runtime->GetLinearAlloc(); + if (!la->Contains(method)) { + // Check image space. + bool in_image = false; + for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) { + if (space->IsImageSpace()) { + auto* image_space = space->AsImageSpace(); + const auto& header = image_space->GetImageHeader(); + const auto* methods = &header.GetMethodsSection(); + if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) { + in_image = true; + break; + } + } + } + CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image"; + } if (cur_quick_frame_ != nullptr) { method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_); // Frame sanity. @@ -701,7 +731,7 @@ void StackVisitor::WalkStack(bool include_transitions) { if (cur_quick_frame_ != nullptr) { // Handle quick stack frames. // Can't be both a shadow and a quick fragment. DCHECK(current_fragment->GetTopShadowFrame() == nullptr); - mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr(); + ArtMethod* method = *cur_quick_frame_; while (method != nullptr) { SanityCheckFrame(); bool should_continue = VisitFrame(); @@ -727,8 +757,7 @@ void StackVisitor::WalkStack(bool include_transitions) { if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) { // Skip runtime save all callee frames which are used to deliver exceptions. } else if (instrumentation_frame.interpreter_entry_) { - mirror::ArtMethod* callee = - Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs); + ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs); CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: " << PrettyMethod(GetMethod()); } else if (instrumentation_frame.method_ != GetMethod()) { @@ -747,9 +776,20 @@ void StackVisitor::WalkStack(bool include_transitions) { } cur_quick_frame_pc_ = return_pc; uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size; - cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame); + cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame); + + if (kDebugStackWalk) { + LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size + << " optimized=" << method->IsOptimized(sizeof(void*)) + << " native=" << method->IsNative() + << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode() + << "," << method->GetEntryPointFromJni() + << "," << method->GetEntryPointFromInterpreter() + << " next=" << *cur_quick_frame_; + } + cur_depth_++; - method = cur_quick_frame_->AsMirrorPtr(); + method = *cur_quick_frame_; } } else if (cur_shadow_frame_ != nullptr) { do { @@ -782,4 +822,42 @@ void JavaFrameRootInfo::Describe(std::ostream& os) const { visitor->DescribeLocation() << " vreg=" << vreg_; } +int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, + uint32_t core_spills, uint32_t fp_spills, + size_t frame_size, int reg, InstructionSet isa) { + size_t pointer_size = InstructionSetPointerSize(isa); + if (kIsDebugBuild) { + auto* runtime = Runtime::Current(); + if (runtime != nullptr) { + CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); + } + } + DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U); + DCHECK_NE(reg, -1); + int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) + + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) + + sizeof(uint32_t); // Filler. + int num_regs = code_item->registers_size_ - code_item->ins_size_; + int temp_threshold = code_item->registers_size_; + const int max_num_special_temps = 1; + if (reg == temp_threshold) { + // The current method pointer corresponds to special location on stack. + return 0; + } else if (reg >= temp_threshold + max_num_special_temps) { + /* + * Special temporaries may have custom locations and the logic above deals with that. + * However, non-special temporaries are placed relative to the outs. + */ + int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */; + int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); + return temps_start + relative_offset; + } else if (reg < num_regs) { + int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); + return locals_start + (reg * sizeof(uint32_t)); + } else { + // Handle ins. + return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */; + } +} + } // namespace art diff --git a/runtime/stack.h b/runtime/stack.h index 4d3657369a..38dfe1b0b1 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -31,10 +31,10 @@ namespace art { namespace mirror { - class ArtMethod; class Object; } // namespace mirror +class ArtMethod; class Context; class ShadowFrame; class HandleScope; @@ -75,7 +75,7 @@ class ShadowFrame { // Create ShadowFrame in heap for deoptimization. static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, - mirror::ArtMethod* method, uint32_t dex_pc) { + ArtMethod* method, uint32_t dex_pc) { uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; return Create(num_vregs, link, method, dex_pc, memory); } @@ -88,7 +88,7 @@ class ShadowFrame { // Create ShadowFrame for interpreter using provided memory. static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link, - mirror::ArtMethod* method, uint32_t dex_pc, void* memory) { + ArtMethod* method, uint32_t dex_pc, void* memory) { ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); return sf; } @@ -238,16 +238,11 @@ class ShadowFrame { } } - mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(method_ != nullptr); return method_; } - mirror::ArtMethod** GetMethodAddress() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(method_ != nullptr); - return &method_; - } - mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -284,7 +279,7 @@ class ShadowFrame { } private: - ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method, + ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, uint32_t dex_pc, bool has_reference_array) : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) { if (has_reference_array) { @@ -308,7 +303,7 @@ class ShadowFrame { const uint32_t number_of_vregs_; // Link to previous shadow frame or null. ShadowFrame* link_; - mirror::ArtMethod* method_; + ArtMethod* method_; uint32_t dex_pc_; uint32_t vregs_[0]; @@ -356,11 +351,11 @@ class PACKED(4) ManagedStack { return link_; } - StackReference<mirror::ArtMethod>* GetTopQuickFrame() const { + ArtMethod** GetTopQuickFrame() const { return top_quick_frame_; } - void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) { + void SetTopQuickFrame(ArtMethod** top) { DCHECK(top_shadow_frame_ == nullptr); top_quick_frame_ = top; } @@ -403,7 +398,7 @@ class PACKED(4) ManagedStack { bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const; private: - StackReference<mirror::ArtMethod>* top_quick_frame_; + ArtMethod** top_quick_frame_; ManagedStack* link_; ShadowFrame* top_shadow_frame_; }; @@ -430,11 +425,11 @@ class StackVisitor { void WalkStack(bool include_transitions = false) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (cur_shadow_frame_ != nullptr) { return cur_shadow_frame_->GetMethod(); } else if (cur_quick_frame_ != nullptr) { - return cur_quick_frame_->AsMirrorPtr(); + return *cur_quick_frame_; } else { return nullptr; } @@ -484,30 +479,30 @@ class StackVisitor { } // Get the method and dex pc immediately after the one that's currently being visited. - bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) + bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) + bool IsReferenceVReg(ArtMethod* m, uint16_t vreg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const + bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, + bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) + bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, + bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uintptr_t* GetGPRAddress(uint32_t reg) const; // This is a fast-path for getting/setting values in a quick frame. - uint32_t* GetVRegAddrFromQuickCode(StackReference<mirror::ArtMethod>* cur_quick_frame, + uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item, uint32_t core_spills, uint32_t fp_spills, size_t frame_size, uint16_t vreg) const { @@ -541,7 +536,7 @@ class StackVisitor { * | IN[ins-1] | {Note: resides in caller's frame} * | . | * | IN[0] | - * | caller's ArtMethod | ... StackReference<ArtMethod> + * | caller's ArtMethod | ... ArtMethod* * +===============================+ {Note: start of callee's frame} * | core callee-save spill | {variable sized} * +-------------------------------+ @@ -568,46 +563,16 @@ class StackVisitor { * | OUT[outs-2] | * | . | * | OUT[0] | - * | StackReference<ArtMethod> | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned + * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned * +===============================+ */ static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, uint32_t core_spills, uint32_t fp_spills, - size_t frame_size, int reg, InstructionSet isa) { - DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U); - DCHECK_NE(reg, -1); - int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) - + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) - + sizeof(uint32_t); // Filler. - int num_regs = code_item->registers_size_ - code_item->ins_size_; - int temp_threshold = code_item->registers_size_; - const int max_num_special_temps = 1; - if (reg == temp_threshold) { - // The current method pointer corresponds to special location on stack. - return 0; - } else if (reg >= temp_threshold + max_num_special_temps) { - /* - * Special temporaries may have custom locations and the logic above deals with that. - * However, non-special temporaries are placed relative to the outs. - */ - int temps_start = sizeof(StackReference<mirror::ArtMethod>) + - code_item->outs_size_ * sizeof(uint32_t); - int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); - return temps_start + relative_offset; - } else if (reg < num_regs) { - int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); - return locals_start + (reg * sizeof(uint32_t)); - } else { - // Handle ins. - return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + - sizeof(StackReference<mirror::ArtMethod>); - } - } + size_t frame_size, int reg, InstructionSet isa); static int GetOutVROffset(uint16_t out_num, InstructionSet isa) { - UNUSED(isa); // According to stack model, the first out is above the Method referernce. - return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t)); + return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t); } bool IsInInlinedFrame() const { @@ -618,7 +583,7 @@ class StackVisitor { return cur_quick_frame_pc_; } - StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const { + ArtMethod** GetCurrentQuickFrame() const { return cur_quick_frame_; } @@ -626,10 +591,10 @@ class StackVisitor { return cur_shadow_frame_; } - HandleScope* GetCurrentHandleScope() const { - StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame(); - ++sp; // Skip Method*; handle scope comes next; - return reinterpret_cast<HandleScope*>(sp); + HandleScope* GetCurrentHandleScope(size_t pointer_size) const { + ArtMethod** sp = GetCurrentQuickFrame(); + // Skip ArtMethod*; handle scope comes next; + return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); } std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -668,19 +633,19 @@ class StackVisitor { uintptr_t GetFPR(uint32_t reg) const; void SetFPR(uint32_t reg, uintptr_t value); - bool GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, + bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, + bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, + bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, + bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -688,13 +653,13 @@ class StackVisitor { uint64_t* val) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, + bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, + bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value, @@ -706,7 +671,7 @@ class StackVisitor { Thread* const thread_; const StackWalkKind walk_kind_; ShadowFrame* cur_shadow_frame_; - StackReference<mirror::ArtMethod>* cur_quick_frame_; + ArtMethod** cur_quick_frame_; uintptr_t cur_quick_frame_pc_; // Lazily computed, number of frames in the stack. size_t num_frames_; diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h new file mode 100644 index 0000000000..5971524d81 --- /dev/null +++ b/runtime/stride_iterator.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_STRIDE_ITERATOR_H_ +#define ART_RUNTIME_STRIDE_ITERATOR_H_ + +#include <iterator> + +namespace art { + +template<typename T> +class StrideIterator : public std::iterator<std::random_access_iterator_tag, T> { + public: + StrideIterator(const StrideIterator&) = default; + StrideIterator(StrideIterator&&) = default; + StrideIterator& operator=(const StrideIterator&) = default; + StrideIterator& operator=(StrideIterator&&) = default; + + StrideIterator(uintptr_t ptr, size_t stride) + : ptr_(ptr), stride_(stride) { + } + + bool operator==(const StrideIterator& other) const { + return ptr_ == other.ptr_; + } + + bool operator!=(const StrideIterator& other) const { + return !(*this == other); + } + + StrideIterator operator++() { // Value after modification. + ptr_ += stride_; + return *this; + } + + StrideIterator operator++(int) { + auto temp = *this; + ptr_ += stride_; + return temp; + } + + T& operator*() const { + return *reinterpret_cast<T*>(ptr_); + } + + T* operator->() const { + return &**this; + } + + private: + uintptr_t ptr_; + const size_t stride_; +}; + +} // namespace art + +#endif // ART_RUNTIME_STRIDE_ITERATOR_H_ diff --git a/runtime/thread.cc b/runtime/thread.cc index b3b55c489c..f37960f1c7 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -33,6 +33,7 @@ #include "arch/context.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/bit_utils.h" #include "base/mutex.h" #include "base/timing_logger.h" @@ -50,7 +51,6 @@ #include "handle_scope-inl.h" #include "indirect_reference_table-inl.h" #include "jni_internal.h" -#include "mirror/art_method-inl.h" #include "mirror/class_loader.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" @@ -756,7 +756,7 @@ bool Thread::RequestCheckpoint(Closure* function) { union StateAndFlags new_state_and_flags; new_state_and_flags.as_int = old_state_and_flags.as_int; new_state_and_flags.as_struct.flags |= kCheckpointRequest; - bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( + bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( old_state_and_flags.as_int, new_state_and_flags.as_int); if (UNLIKELY(!success)) { // The thread changed state before the checkpoint was installed. @@ -958,7 +958,7 @@ struct StackDumpVisitor : public StackVisitor { } bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; } @@ -1029,7 +1029,7 @@ struct StackDumpVisitor : public StackVisitor { std::ostream& os; const Thread* thread; const bool can_allocate; - mirror::ArtMethod* last_method; + ArtMethod* last_method; int last_line_number; int repetition_count; int frame_count; @@ -1060,7 +1060,7 @@ static bool ShouldShowNativeStack(const Thread* thread) // We don't just check kNative because native methods will be in state kSuspended if they're // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the // thread-startup states if it's early enough in their life cycle (http://b/7432159). - mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr); + ArtMethod* current_method = thread->GetCurrentMethod(nullptr); return current_method != nullptr && current_method->IsNative(); } @@ -1541,7 +1541,7 @@ class CountStackDepthVisitor : public StackVisitor { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (skipping_ && !m->IsRuntimeMethod() && !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { skipping_ = false; @@ -1578,63 +1578,54 @@ class BuildInternalStackTraceVisitor : public StackVisitor { self_(self), skip_depth_(skip_depth), count_(0), - dex_pc_trace_(nullptr), - method_trace_(nullptr) {} + trace_(nullptr), + pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} bool Init(int depth) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Allocate method trace with an extra slot that will hold the PC trace - StackHandleScope<1> hs(self_); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Handle<mirror::ObjectArray<mirror::Object>> method_trace( - hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1))); - if (method_trace.Get() == nullptr) { + // Allocate method trace with format [method pointers][pcs]. + auto* cl = Runtime::Current()->GetClassLinker(); + trace_ = cl->AllocPointerArray(self_, depth * 2); + if (trace_ == nullptr) { + self_->AssertPendingOOMException(); return false; } - mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth); - if (dex_pc_trace == nullptr) { - return false; - } - // Save PC trace in last element of method trace, also places it into the - // object graph. - // We are called from native: use non-transactional mode. - method_trace->Set<kTransactionActive>(depth, dex_pc_trace); - // Set the Object*s and assert that no thread suspension is now possible. + // If We are called from native, use non-transactional mode. const char* last_no_suspend_cause = self_->StartAssertNoThreadSuspension("Building internal stack trace"); CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; - method_trace_ = method_trace.Get(); - dex_pc_trace_ = dex_pc_trace; return true; } virtual ~BuildInternalStackTraceVisitor() { - if (method_trace_ != nullptr) { + if (trace_ != nullptr) { self_->EndAssertNoThreadSuspension(nullptr); } } bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) { + if (trace_ == nullptr) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } if (skip_depth_ > 0) { skip_depth_--; return true; } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; // Ignore runtime frames (in particular callee save). } - method_trace_->Set<kTransactionActive>(count_, m); - dex_pc_trace_->Set<kTransactionActive>(count_, - m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc()); + trace_->SetElementPtrSize<kTransactionActive>( + count_, m, pointer_size_); + trace_->SetElementPtrSize<kTransactionActive>( + trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(), + pointer_size_); ++count_; return true; } - mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { - return method_trace_; + mirror::PointerArray* GetInternalStackTrace() const { + return trace_; } private: @@ -1643,10 +1634,10 @@ class BuildInternalStackTraceVisitor : public StackVisitor { int32_t skip_depth_; // Current position down stack trace. uint32_t count_; - // Array of dex PC values. - mirror::IntArray* dex_pc_trace_; - // An array of the methods on the stack, the last entry is a reference to the PC trace. - mirror::ObjectArray<mirror::Object>* method_trace_; + // An array of the methods on the stack, the last entries are the dex PCs. + mirror::PointerArray* trace_; + // For cross compilation. + size_t pointer_size_; }; template<bool kTransactionActive> @@ -1665,13 +1656,16 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable return nullptr; // Allocation failed. } build_trace_visitor.WalkStack(); - mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); + mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace(); if (kIsDebugBuild) { - for (int32_t i = 0; i < trace->GetLength(); ++i) { - CHECK(trace->Get(i) != nullptr); + // Second half is dex PCs. + for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) { + auto* method = trace->GetElementPtrSize<ArtMethod*>( + i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); + CHECK(method != nullptr); } } - return soa.AddLocalReference<jobjectArray>(trace); + return soa.AddLocalReference<jobject>(trace); } template jobject Thread::CreateInternalStackTrace<false>( const ScopedObjectAccessAlreadyRunnable& soa) const; @@ -1688,9 +1682,9 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array, int* stack_depth) { // Decode the internal stack trace into the depth, method trace and PC trace - int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1; + int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2; - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + auto* cl = Runtime::Current()->GetClassLinker(); jobjectArray result; @@ -1704,7 +1698,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( } else { // Create java_trace array and place in local reference table mirror::ObjectArray<mirror::StackTraceElement>* java_traces = - class_linker->AllocStackTraceElementArray(soa.Self(), depth); + cl->AllocStackTraceElementArray(soa.Self(), depth); if (java_traces == nullptr) { return nullptr; } @@ -1716,10 +1710,11 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( } for (int32_t i = 0; i < depth; ++i) { - mirror::ObjectArray<mirror::Object>* method_trace = - soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal); + auto* method_trace = soa.Decode<mirror::PointerArray*>(internal); // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) - mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i)); + ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*)); + uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( + i + method_trace->GetLength() / 2, sizeof(void*)); int32_t line_number; StackHandleScope<3> hs(soa.Self()); auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); @@ -1729,27 +1724,28 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( class_name_object.Assign(method->GetDeclaringClass()->GetName()); // source_name_object intentionally left null for proxy methods } else { - mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth)); - uint32_t dex_pc = pc_trace->Get(i); line_number = method->GetLineNumFromDexPC(dex_pc); // Allocate element, potentially triggering GC // TODO: reuse class_name_object via Class::name_? const char* descriptor = method->GetDeclaringClassDescriptor(); CHECK(descriptor != nullptr); std::string class_name(PrettyDescriptor(descriptor)); - class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); + class_name_object.Assign( + mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); if (class_name_object.Get() == nullptr) { + soa.Self()->AssertPendingOOMException(); return nullptr; } const char* source_file = method->GetDeclaringClassSourceFile(); if (source_file != nullptr) { source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); if (source_name_object.Get() == nullptr) { + soa.Self()->AssertPendingOOMException(); return nullptr; } } } - const char* method_name = method->GetName(); + const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName(); CHECK(method_name != nullptr); Handle<mirror::String> method_name_object( hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); @@ -1790,7 +1786,7 @@ void Thread::ThrowNewException(const char* exception_class_descriptor, static mirror::ClassLoader* GetCurrentClassLoader(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* method = self->GetCurrentMethod(nullptr); + ArtMethod* method = self->GetCurrentMethod(nullptr); return method != nullptr ? method->GetDeclaringClass()->GetClassLoader() : nullptr; @@ -1805,9 +1801,9 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); ClearException(); Runtime* runtime = Runtime::Current(); + auto* cl = runtime->GetClassLinker(); Handle<mirror::Class> exception_class( - hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor, - class_loader))); + hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); if (UNLIKELY(exception_class.Get() == nullptr)) { CHECK(IsExceptionPending()); LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); @@ -1852,8 +1848,8 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, signature = "(Ljava/lang/Throwable;)V"; } } - mirror::ArtMethod* exception_init_method = - exception_class->FindDeclaredDirectMethod("<init>", signature); + ArtMethod* exception_init_method = + exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize()); CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " << PrettyDescriptor(exception_class_descriptor); @@ -2129,7 +2125,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor { dex_pc_(0), abort_on_error_(abort_on_error) {} bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. return true; @@ -2142,12 +2138,12 @@ struct CurrentMethodVisitor FINAL : public StackVisitor { return false; } mirror::Object* this_object_; - mirror::ArtMethod* method_; + ArtMethod* method_; uint32_t dex_pc_; const bool abort_on_error_; }; -mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const { +ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const { CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error); visitor.WalkStack(false); if (dex_pc != nullptr) { @@ -2189,9 +2185,7 @@ class ReferenceMapVisitor : public StackVisitor { } void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod** method_addr = shadow_frame->GetMethodAddress(); - visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this); - mirror::ArtMethod* m = *method_addr; + ArtMethod* m = shadow_frame->GetMethod(); DCHECK(m != nullptr); size_t num_regs = shadow_frame->NumberOfVRegs(); if (m->IsNative() || shadow_frame->HasReferenceArray()) { @@ -2233,17 +2227,15 @@ class ReferenceMapVisitor : public StackVisitor { private: void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame(); - mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr(); - mirror::ArtMethod* old_method = m; - visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this); - if (m != old_method) { - cur_quick_frame->Assign(m); - } + auto* cur_quick_frame = GetCurrentQuickFrame(); + DCHECK(cur_quick_frame != nullptr); + auto* m = *cur_quick_frame; // Process register map (which native and runtime methods don't have) if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { if (m->IsOptimized(sizeof(void*))) { + auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( + reinterpret_cast<uintptr_t>(cur_quick_frame)); Runtime* runtime = Runtime::Current(); const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*)); uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point); @@ -2253,8 +2245,7 @@ class ReferenceMapVisitor : public StackVisitor { // Visit stack entries that hold pointers. for (size_t i = 0; i < mask.size_in_bits(); ++i) { if (mask.LoadBit(i)) { - StackReference<mirror::Object>* ref_addr = - reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame) + i; + auto* ref_addr = vreg_base + i; mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; @@ -2290,7 +2281,7 @@ class ReferenceMapVisitor : public StackVisitor { uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point); const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset); DCHECK(reg_bitmap != nullptr); - const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point); + const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point); const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); // For all dex registers in the bitmap @@ -2361,9 +2352,6 @@ void Thread::VisitRoots(RootVisitor* visitor) { if (tlsPtr_.debug_invoke_req != nullptr) { tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); } - if (tlsPtr_.single_step_control != nullptr) { - tlsPtr_.single_step_control->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); - } if (tlsPtr_.deoptimization_shadow_frame != nullptr) { RootCallbackVisitor visitor_to_callback(visitor, thread_id); ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback); @@ -2392,9 +2380,6 @@ void Thread::VisitRoots(RootVisitor* visitor) { ReleaseLongJumpContext(context); for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); - DCHECK(frame.method_ != nullptr); - visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_), - RootInfo(kRootVMInternal, thread_id)); } } diff --git a/runtime/thread.h b/runtime/thread.h index 96e0916dd5..8c2e215ff8 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -52,7 +52,6 @@ namespace collector { } // namespace gc namespace mirror { - class ArtMethod; class Array; class Class; class ClassLoader; @@ -69,6 +68,7 @@ namespace verifier { class MethodVerifier; } // namespace verifier +class ArtMethod; class BaseMutex; class ClassLinker; class Closure; @@ -374,7 +374,7 @@ class Thread { // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will // abort the runtime iff abort_on_error is true. - mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const + ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns whether the given exception was thrown by the current Java method being executed @@ -382,7 +382,7 @@ class Thread { bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method) { + void SetTopOfStack(ArtMethod** top_method) { tlsPtr_.managed_stack.SetTopQuickFrame(top_method); } @@ -810,11 +810,11 @@ class Thread { return tlsPtr_.instrumentation_stack; } - std::vector<mirror::ArtMethod*>* GetStackTraceSample() const { + std::vector<ArtMethod*>* GetStackTraceSample() const { return tlsPtr_.stack_trace_sample; } - void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) { + void SetStackTraceSample(std::vector<ArtMethod*>* sample) { tlsPtr_.stack_trace_sample = sample; } @@ -1161,7 +1161,7 @@ class Thread { size_t stack_size; // Pointer to previous stack trace captured by sampling profiler. - std::vector<mirror::ArtMethod*>* stack_trace_sample; + std::vector<ArtMethod*>* stack_trace_sample; // The next thread in the wait set this thread is part of or null if not waiting. Thread* wait_next; diff --git a/runtime/trace.cc b/runtime/trace.cc index f8747168b3..d3b3af871f 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -22,6 +22,7 @@ #define ATRACE_TAG ATRACE_TAG_DALVIK #include "cutils/trace.h" +#include "art_method-inl.h" #include "base/casts.h" #include "base/stl_util.h" #include "base/time_utils.h" @@ -31,7 +32,6 @@ #include "debugger.h" #include "dex_file-inl.h" #include "instrumentation.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/object_array-inl.h" @@ -98,7 +98,7 @@ class BuildStackTraceVisitor : public StackVisitor { method_trace_(Trace::AllocStackTrace()) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); // Ignore runtime frames (in particular callee save). if (!m->IsRuntimeMethod()) { method_trace_->push_back(m); @@ -107,12 +107,12 @@ class BuildStackTraceVisitor : public StackVisitor { } // Returns a stack trace where the topmost frame corresponds with the first element of the vector. - std::vector<mirror::ArtMethod*>* GetStackTrace() const { + std::vector<ArtMethod*>* GetStackTrace() const { return method_trace_; } private: - std::vector<mirror::ArtMethod*>* const method_trace_; + std::vector<ArtMethod*>* const method_trace_; }; static const char kTraceTokenChar = '*'; @@ -120,42 +120,41 @@ static const uint16_t kTraceHeaderLength = 32; static const uint32_t kTraceMagicValue = 0x574f4c53; static const uint16_t kTraceVersionSingleClock = 2; static const uint16_t kTraceVersionDualClock = 3; -static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 -static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps +static const uint16_t kTraceRecordSizeSingleClock = 14; // using v2 +static const uint16_t kTraceRecordSizeDualClock = 18; // using v3 with two timestamps TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; Trace* volatile Trace::the_trace_ = nullptr; pthread_t Trace::sampling_pthread_ = 0U; -std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; +std::unique_ptr<std::vector<ArtMethod*>> Trace::temp_stack_trace_; // The key identifying the tracer to update instrumentation. static constexpr const char* kTracerInstrumentationKey = "Tracer"; -static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { - return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); +static ArtMethod* DecodeTraceMethodId(uint64_t tmid) { + return reinterpret_cast<ArtMethod*>(tmid & ~kTraceMethodActionMask); } static TraceAction DecodeTraceAction(uint32_t tmid) { return static_cast<TraceAction>(tmid & kTraceMethodActionMask); } -static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, - TraceAction action) { - uint32_t tmid = PointerToLowMemUInt32(method) | action; +static uint64_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) { + auto tmid = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(method)) | action; DCHECK_EQ(method, DecodeTraceMethodId(tmid)); return tmid; } -std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { +std::vector<ArtMethod*>* Trace::AllocStackTrace() { if (temp_stack_trace_.get() != nullptr) { return temp_stack_trace_.release(); } else { - return new std::vector<mirror::ArtMethod*>(); + return new std::vector<ArtMethod*>(); } } -void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { +void Trace::FreeStackTrace(std::vector<ArtMethod*>* stack_trace) { stack_trace->clear(); temp_stack_trace_.reset(stack_trace); } @@ -248,22 +247,22 @@ static void Append8LE(uint8_t* buf, uint64_t val) { static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { BuildStackTraceVisitor build_trace_visitor(thread); build_trace_visitor.WalkStack(); - std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); + std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); Trace* the_trace = reinterpret_cast<Trace*>(arg); the_trace->CompareAndUpdateStackTrace(thread, stack_trace); } static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) { thread->SetTraceClockBase(0); - std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); + std::vector<ArtMethod*>* stack_trace = thread->GetStackTraceSample(); thread->SetStackTraceSample(nullptr); delete stack_trace; } void Trace::CompareAndUpdateStackTrace(Thread* thread, - std::vector<mirror::ArtMethod*>* stack_trace) { + std::vector<ArtMethod*>* stack_trace) { CHECK_EQ(pthread_self(), sampling_pthread_); - std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); + std::vector<ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); // Update the thread's stack trace sample. thread->SetStackTraceSample(stack_trace); // Read timer clocks to use for all events in this trace. @@ -273,7 +272,7 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread, if (old_stack_trace == nullptr) { // If there's no previous stack trace sample for this thread, log an entry event for all // methods in the trace. - for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); + for (std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); rit != stack_trace->rend(); ++rit) { LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, thread_clock_diff, wall_clock_diff); @@ -281,15 +280,15 @@ void Trace::CompareAndUpdateStackTrace(Thread* thread, } else { // If there's a previous stack trace for this thread, diff the traces and emit entry and exit // events accordingly. - std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); - std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); + std::vector<ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); + std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); // Iterate bottom-up over both traces until there's a difference between them. while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { old_rit++; rit++; } // Iterate top-down over the old trace until the point where they differ, emitting exit events. - for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); + for (std::vector<ArtMethod*>::iterator old_it = old_stack_trace->begin(); old_it != old_rit.base(); ++old_it) { LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, thread_clock_diff, wall_clock_diff); @@ -640,14 +639,22 @@ Trace::~Trace() { delete streaming_lock_; } +static uint64_t ReadBytes(uint8_t* buf, size_t bytes) { + uint64_t ret = 0; + for (size_t i = 0; i < bytes; ++i) { + ret |= static_cast<uint64_t>(buf[i]) << (i * 8); + } + return ret; +} + static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint8_t* ptr = buf + kTraceHeaderLength; uint8_t* end = buf + buf_size; while (ptr < end) { - uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); - mirror::ArtMethod* method = DecodeTraceMethodId(tmid); + uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid)); + ArtMethod* method = DecodeTraceMethodId(tmid); TraceAction action = DecodeTraceAction(tmid); LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); ptr += GetRecordSize(clock_source); @@ -656,12 +663,12 @@ static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source static void GetVisitedMethodsFromBitSets( const std::map<mirror::DexCache*, DexIndexBitSet*>& seen_methods, - std::set<mirror::ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { for (auto& e : seen_methods) { DexIndexBitSet* bit_set = e.second; for (uint32_t i = 0; i < bit_set->size(); ++i) { if ((*bit_set)[i]) { - visited_methods->insert(e.first->GetResolvedMethod(i)); + visited_methods->insert(e.first->GetResolvedMethod(i, sizeof(void*))); } } } @@ -670,7 +677,7 @@ static void GetVisitedMethodsFromBitSets( void Trace::FinishTracing() { size_t final_offset = 0; - std::set<mirror::ArtMethod*> visited_methods; + std::set<ArtMethod*> visited_methods; if (trace_output_mode_ == TraceOutputMode::kStreaming) { // Write the secondary file with all the method names. GetVisitedMethodsFromBitSets(seen_methods_, &visited_methods); @@ -761,14 +768,14 @@ void Trace::FinishTracing() { } void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t new_dex_pc) { + ArtMethod* method, uint32_t new_dex_pc) { UNUSED(thread, this_object, method, new_dex_pc); // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; } void Trace::FieldRead(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field) + ArtMethod* method, uint32_t dex_pc, ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field); // We're not recorded to listen to this kind of event, so complain. @@ -776,7 +783,7 @@ void Trace::FieldRead(Thread* thread, mirror::Object* this_object, } void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field, field_value); @@ -785,7 +792,7 @@ void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, } void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { + ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { uint32_t thread_clock_diff = 0; uint32_t wall_clock_diff = 0; ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); @@ -794,7 +801,7 @@ void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_ } void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED, + ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED, const JValue& return_value ATTRIBUTE_UNUSED) { uint32_t thread_clock_diff = 0; uint32_t wall_clock_diff = 0; @@ -804,7 +811,7 @@ void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_U } void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, - mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { + ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { uint32_t thread_clock_diff = 0; uint32_t wall_clock_diff = 0; ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); @@ -818,7 +825,7 @@ void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) LOG(ERROR) << "Unexpected exception caught event in tracing"; } -void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, +void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t /*dex_pc_offset*/) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method); @@ -840,11 +847,12 @@ void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wa } } -bool Trace::RegisterMethod(mirror::ArtMethod* method) { +bool Trace::RegisterMethod(ArtMethod* method) { mirror::DexCache* dex_cache = method->GetDexCache(); - if (dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) != method) { - DCHECK(dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr); - dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method); + auto* resolved_method = dex_cache->GetResolvedMethod(method->GetDexMethodIndex(), sizeof(void*)); + if (resolved_method != method) { + DCHECK(resolved_method == nullptr); + dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method, sizeof(void*)); } if (seen_methods_.find(dex_cache) == seen_methods_.end()) { seen_methods_.insert(std::make_pair(dex_cache, new DexIndexBitSet())); @@ -869,8 +877,9 @@ bool Trace::RegisterThread(Thread* thread) { return false; } -static std::string GetMethodLine(mirror::ArtMethod* method) +static std::string GetMethodLine(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + method = method->GetInterfaceMethodIfProxy(sizeof(void*)); return StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); @@ -902,7 +911,7 @@ void Trace::WriteToBuf(const uint8_t* src, size_t src_size) { memcpy(buf_.get() + old_offset, src, src_size); } -void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, +void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method, instrumentation::Instrumentation::InstrumentationEvent event, uint32_t thread_clock_diff, uint32_t wall_clock_diff) { // Advance cur_offset_ atomically. @@ -936,11 +945,11 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; } - uint32_t method_value = EncodeTraceMethodAndAction(method, action); + uint64_t method_value = EncodeTraceMethodAndAction(method, action); // Write data uint8_t* ptr; - static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet. + static constexpr size_t kPacketSize = 18U; // The maximum size of data in a packet. uint8_t stack_buf[kPacketSize]; // Space to store a packet when in streaming mode. if (trace_output_mode_ == TraceOutputMode::kStreaming) { ptr = stack_buf; @@ -949,8 +958,8 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, } Append2LE(ptr, thread->GetTid()); - Append4LE(ptr + 2, method_value); - ptr += 6; + Append8LE(ptr + 2, method_value); + ptr += 10; if (UseThreadCpuClock()) { Append4LE(ptr, thread_clock_diff); @@ -959,7 +968,7 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, if (UseWallClock()) { Append4LE(ptr, wall_clock_diff); } - static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect."); + static_assert(kPacketSize == 2 + 8 + 4 + 4, "Packet size incorrect."); if (trace_output_mode_ == TraceOutputMode::kStreaming) { MutexLock mu(Thread::Current(), *streaming_lock_); // To serialize writing. @@ -990,19 +999,19 @@ void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, } void Trace::GetVisitedMethods(size_t buf_size, - std::set<mirror::ArtMethod*>* visited_methods) { + std::set<ArtMethod*>* visited_methods) { uint8_t* ptr = buf_.get() + kTraceHeaderLength; uint8_t* end = buf_.get() + buf_size; while (ptr < end) { - uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); - mirror::ArtMethod* method = DecodeTraceMethodId(tmid); + uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid)); + ArtMethod* method = DecodeTraceMethodId(tmid); visited_methods->insert(method); ptr += GetRecordSize(clock_source_); } } -void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { +void Trace::DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) { for (const auto& method : visited_methods) { os << GetMethodLine(method); } diff --git a/runtime/trace.h b/runtime/trace.h index df6d5e7b71..1539c066c5 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -35,11 +35,11 @@ namespace art { namespace mirror { - class ArtMethod; class DexCache; } // namespace mirror class ArtField; +class ArtMethod; class Thread; using DexIndexBitSet = std::bitset<65536>; @@ -99,38 +99,38 @@ class Trace FINAL : public instrumentation::InstrumentationListener { void MeasureClockOverhead(); uint32_t GetClockOverheadNanoSeconds(); - void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace) + void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // InstrumentationListener implementation. void MethodEntered(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) + ArtMethod* method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void MethodExited(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, + ArtMethod* method, uint32_t dex_pc, const JValue& return_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void MethodUnwind(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc) + ArtMethod* method, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void DexPcMoved(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t new_dex_pc) + ArtMethod* method, uint32_t new_dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void FieldRead(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field) + ArtMethod* method, uint32_t dex_pc, ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void FieldWritten(Thread* thread, mirror::Object* this_object, - mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field, + ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; - void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset) + void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; // Reuse an old stack trace if it exists, otherwise allocate a new one. - static std::vector<mirror::ArtMethod*>* AllocStackTrace(); + static std::vector<ArtMethod*>* AllocStackTrace(); // Clear and store an old stack trace for later use. - static void FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace); + static void FreeStackTrace(std::vector<ArtMethod*>* stack_trace); // Save id and name of a thread before it exits. static void StoreExitingThreadInfo(Thread* thread); @@ -150,20 +150,20 @@ class Trace FINAL : public instrumentation::InstrumentationListener { void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff); - void LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, + void LogMethodTraceEvent(Thread* thread, ArtMethod* method, instrumentation::Instrumentation::InstrumentationEvent event, uint32_t thread_clock_diff, uint32_t wall_clock_diff) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Methods to output traced methods and threads. - void GetVisitedMethods(size_t end_offset, std::set<mirror::ArtMethod*>* visited_methods); - void DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) + void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods); + void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); // Methods to register seen entitites in streaming mode. The methods return true if the entity // is newly discovered. - bool RegisterMethod(mirror::ArtMethod* method) + bool RegisterMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); bool RegisterThread(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); @@ -183,7 +183,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener { static pthread_t sampling_pthread_; // Used to remember an unused stack trace to avoid re-allocation during sampling. - static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_; + static std::unique_ptr<std::vector<ArtMethod*>> temp_stack_trace_; // File to write trace data out to, null if direct to ddms. std::unique_ptr<File> trace_file_; diff --git a/runtime/transaction.cc b/runtime/transaction.cc index ab821d7714..d91860bd83 100644 --- a/runtime/transaction.cc +++ b/runtime/transaction.cc @@ -20,6 +20,7 @@ #include "base/logging.h" #include "gc/accounting/card_table-inl.h" #include "intern_table.h" +#include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc index aee2c543ac..8279a26f52 100644 --- a/runtime/transaction_test.cc +++ b/runtime/transaction_test.cc @@ -17,10 +17,10 @@ #include "transaction.h" #include "art_field-inl.h" +#include "art_method-inl.h" #include "class_linker-inl.h" #include "common_runtime_test.h" #include "mirror/array-inl.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" namespace art { diff --git a/runtime/utils.cc b/runtime/utils.cc index 2671b46bad..4923342e8e 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -26,10 +26,10 @@ #include <memory> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex_file-inl.h" -#include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -344,10 +344,13 @@ std::string PrettyReturnType(const char* signature) { return PrettyDescriptor(return_type); } -std::string PrettyMethod(mirror::ArtMethod* m, bool with_signature) { +std::string PrettyMethod(ArtMethod* m, bool with_signature) { if (m == nullptr) { return "null"; } + if (!m->IsRuntimeMethod()) { + m = m->GetInterfaceMethodIfProxy(Runtime::Current()->GetClassLinker()->GetImagePointerSize()); + } std::string result(PrettyDescriptor(m->GetDeclaringClassDescriptor())); result += '.'; result += m->GetName(); @@ -595,7 +598,7 @@ std::string DescriptorToName(const char* descriptor) { return descriptor; } -std::string JniShortName(mirror::ArtMethod* m) { +std::string JniShortName(ArtMethod* m) { std::string class_name(m->GetDeclaringClassDescriptor()); // Remove the leading 'L' and trailing ';'... CHECK_EQ(class_name[0], 'L') << class_name; @@ -613,7 +616,7 @@ std::string JniShortName(mirror::ArtMethod* m) { return short_name; } -std::string JniLongName(mirror::ArtMethod* m) { +std::string JniLongName(ArtMethod* m) { std::string long_name; long_name += JniShortName(m); long_name += "__"; @@ -1088,7 +1091,7 @@ static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream #endif void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, - mirror::ArtMethod* current_method, void* ucontext_ptr) { + ArtMethod* current_method, void* ucontext_ptr) { #if __linux__ // b/18119146 if (RUNNING_ON_VALGRIND != 0) { diff --git a/runtime/utils.h b/runtime/utils.h index e7532e1c84..1ef98e70d5 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -34,10 +34,10 @@ namespace art { class ArtField; +class ArtMethod; class DexFile; namespace mirror { -class ArtMethod; class Class; class Object; class String; @@ -125,7 +125,7 @@ std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_t // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). -std::string PrettyMethod(mirror::ArtMethod* m, bool with_signature = true) +std::string PrettyMethod(ArtMethod* m, bool with_signature = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); @@ -181,10 +181,10 @@ bool IsValidDescriptor(const char* s); // "Ljava/lang/String;" bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. -std::string JniShortName(mirror::ArtMethod* m) +std::string JniShortName(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. -std::string JniLongName(mirror::ArtMethod* m) +std::string JniLongName(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); @@ -221,7 +221,7 @@ void SetThreadName(const char* thread_name); // Dumps the native stack for thread 'tid' to 'os'. void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "", - mirror::ArtMethod* current_method = nullptr, void* ucontext = nullptr) + ArtMethod* current_method = nullptr, void* ucontext = nullptr) NO_THREAD_SAFETY_ANALYSIS; // Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86. @@ -320,6 +320,9 @@ inline bool TestBitmap(size_t idx, const uint8_t* bitmap) { return ((bitmap[idx / kBitsPerByte] >> (idx % kBitsPerByte)) & 0x01) != 0; } +static inline constexpr bool ValidPointerSize(size_t pointer_size) { + return pointer_size == 4 || pointer_size == 8; +} } // namespace art diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc index 8a7f8052ce..66e38b1c13 100644 --- a/runtime/utils_test.cc +++ b/runtime/utils_test.cc @@ -185,19 +185,19 @@ TEST_F(UtilsTest, JniShortName_JniLongName) { ScopedObjectAccess soa(Thread::Current()); mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;"); ASSERT_TRUE(c != nullptr); - mirror::ArtMethod* m; + ArtMethod* m; - m = c->FindVirtualMethod("charAt", "(I)C"); + m = c->FindVirtualMethod("charAt", "(I)C", sizeof(void*)); ASSERT_TRUE(m != nullptr); EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m)); EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m)); - m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I"); + m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", sizeof(void*)); ASSERT_TRUE(m != nullptr); EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m)); EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m)); - m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;"); + m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", sizeof(void*)); ASSERT_TRUE(m != nullptr); EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m)); EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m)); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index e6801ded5a..aa54b170a7 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -19,6 +19,7 @@ #include <iostream> #include "art_field-inl.h" +#include "art_method-inl.h" #include "base/logging.h" #include "base/mutex-inl.h" #include "base/time_utils.h" @@ -32,7 +33,6 @@ #include "indenter.h" #include "intern_table.h" #include "leb128.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -115,15 +115,13 @@ static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, Register } MethodVerifier::FailureKind MethodVerifier::VerifyMethod( - mirror::ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) { - Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); + ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) { + StackHandleScope<2> hs(Thread::Current()); mirror::Class* klass = method->GetDeclaringClass(); auto h_dex_cache(hs.NewHandle(klass->GetDexCache())); auto h_class_loader(hs.NewHandle(klass->GetClassLoader())); - auto h_method = hs.NewHandle(method); - return VerifyMethod(self, method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache, - h_class_loader, klass->GetClassDef(), method->GetCodeItem(), h_method, + return VerifyMethod(hs.Self(), method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache, + h_class_loader, klass->GetClassDef(), method->GetCodeItem(), method, method->GetAccessFlags(), allow_soft_failures, false); } @@ -162,7 +160,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, StackHandleScope<2> hs(self); Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); - return VerifyClass(self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error); + return VerifyClass( + self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error); } MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, @@ -197,16 +196,16 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, } previous_direct_method_idx = method_idx; InvokeType type = it.GetMethodInvokeType(*class_def); - mirror::ArtMethod* method = - linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, - NullHandle<mirror::ArtMethod>(), type); + ArtMethod* method = linker->ResolveMethod( + *dex_file, method_idx, dex_cache, class_loader, nullptr, type); if (method == nullptr) { DCHECK(self->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. self->ClearException(); + } else { + DCHECK(method->GetDeclaringClassUnchecked() != nullptr) << type; } StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(method)); MethodVerifier::FailureKind result = VerifyMethod(self, method_idx, dex_file, @@ -214,10 +213,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, class_loader, class_def, it.GetMethodCodeItem(), - h_method, - it.GetMethodAccessFlags(), - allow_soft_failures, - false); + method, it.GetMethodAccessFlags(), allow_soft_failures, false); if (result != kNoFailure) { if (result == kHardFailure) { hard_fail = true; @@ -245,16 +241,14 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, } previous_virtual_method_idx = method_idx; InvokeType type = it.GetMethodInvokeType(*class_def); - mirror::ArtMethod* method = - linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, - NullHandle<mirror::ArtMethod>(), type); + ArtMethod* method = linker->ResolveMethod( + *dex_file, method_idx, dex_cache, class_loader, nullptr, type); if (method == nullptr) { DCHECK(self->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. self->ClearException(); } StackHandleScope<1> hs(self); - Handle<mirror::ArtMethod> h_method(hs.NewHandle(method)); MethodVerifier::FailureKind result = VerifyMethod(self, method_idx, dex_file, @@ -262,10 +256,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, class_loader, class_def, it.GetMethodCodeItem(), - h_method, - it.GetMethodAccessFlags(), - allow_soft_failures, - false); + method, it.GetMethodAccessFlags(), allow_soft_failures, false); if (result != kNoFailure) { if (result == kHardFailure) { hard_fail = true; @@ -305,7 +296,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, - Handle<mirror::ArtMethod> method, + ArtMethod* method, uint32_t method_access_flags, bool allow_soft_failures, bool need_precise_constants) { @@ -355,7 +346,7 @@ MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream& Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, - Handle<mirror::ArtMethod> method, + ArtMethod* method, uint32_t method_access_flags) { MethodVerifier* verifier = new MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, dex_method_idx, method, @@ -379,7 +370,7 @@ MethodVerifier::MethodVerifier(Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, uint32_t dex_method_idx, - Handle<mirror::ArtMethod> method, uint32_t method_access_flags, + ArtMethod* method, uint32_t method_access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool verify_to_dump, bool allow_thread_suspension) @@ -418,15 +409,13 @@ MethodVerifier::~MethodVerifier() { STLDeleteElements(&failure_messages_); } -void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc, +void MethodVerifier::FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc, std::vector<uint32_t>* monitor_enter_dex_pcs) { - Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); + StackHandleScope<2> hs(Thread::Current()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader())); - Handle<mirror::ArtMethod> method(hs.NewHandle(m)); - MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), - m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(), + MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), + m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true, false, false); verifier.interesting_dex_pc_ = dex_pc; verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs; @@ -465,16 +454,13 @@ void MethodVerifier::FindLocksAtDexPc() { Verify(); } -ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m, - uint32_t dex_pc) { - Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); +ArtField* MethodVerifier::FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc) { + StackHandleScope<2> hs(Thread::Current()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader())); - Handle<mirror::ArtMethod> method(hs.NewHandle(m)); - MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), - m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(), - true, true, false, true); + MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), + m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true, + true, false, true); return verifier.FindAccessedFieldAtDexPc(dex_pc); } @@ -497,20 +483,17 @@ ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) { return GetQuickFieldAccess(inst, register_line); } -mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m, - uint32_t dex_pc) { - Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); +ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc) { + StackHandleScope<2> hs(Thread::Current()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader())); - Handle<mirror::ArtMethod> method(hs.NewHandle(m)); - MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), - m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(), - true, true, false, true); + MethodVerifier verifier(hs.Self(), m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), + m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true, + true, false, true); return verifier.FindInvokedMethodAtDexPc(dex_pc); } -mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) { +ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) { CHECK(code_item_ != nullptr); // This only makes sense for methods with code. // Strictly speaking, we ought to be able to get away with doing a subset of the full method @@ -530,14 +513,13 @@ mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) { return GetQuickInvokedMethod(inst, register_line, is_range, false); } -SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(mirror::ArtMethod* m) { +SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(ArtMethod* m) { Thread* self = Thread::Current(); - StackHandleScope<3> hs(self); + StackHandleScope<2> hs(self); Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache())); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader())); - Handle<mirror::ArtMethod> method(hs.NewHandle(m)); MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(), - m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(), + m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true, true, false, true); return verifier.FindStringInitMap(); } @@ -2374,15 +2356,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { inst->Opcode() == Instruction::INVOKE_SUPER_RANGE); bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER || inst->Opcode() == Instruction::INVOKE_SUPER_RANGE); - mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range, - is_super); + ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range, is_super); const RegType* return_type = nullptr; if (called_method != nullptr) { StackHandleScope<1> hs(self_); - Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method)); - mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_); + mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_); if (return_type_class != nullptr) { - return_type = ®_types_.FromClass(h_called_method->GetReturnTypeDescriptor(), + return_type = ®_types_.FromClass(called_method->GetReturnTypeDescriptor(), return_type_class, return_type_class->CannotBeAssignedFromOtherTypes()); } else { @@ -2408,10 +2388,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_DIRECT: case Instruction::INVOKE_DIRECT_RANGE: { bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE); - mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, - METHOD_DIRECT, - is_range, - false); + ArtMethod* called_method = VerifyInvocationArgs(inst, + METHOD_DIRECT, + is_range, + false); const char* return_type_descriptor; bool is_constructor; const RegType* return_type = nullptr; @@ -2425,8 +2405,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { is_constructor = called_method->IsConstructor(); return_type_descriptor = called_method->GetReturnTypeDescriptor(); StackHandleScope<1> hs(self_); - Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method)); - mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_); + mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_); if (return_type_class != nullptr) { return_type = ®_types_.FromClass(return_type_descriptor, return_type_class, @@ -2492,10 +2471,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_STATIC: case Instruction::INVOKE_STATIC_RANGE: { bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE); - mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, - METHOD_STATIC, - is_range, - false); + ArtMethod* called_method = VerifyInvocationArgs(inst, + METHOD_STATIC, + is_range, + false); const char* descriptor; if (called_method == nullptr) { uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); @@ -2517,10 +2496,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_INTERFACE: case Instruction::INVOKE_INTERFACE_RANGE: { bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE); - mirror::ArtMethod* abs_method = VerifyInvocationArgs(inst, - METHOD_INTERFACE, - is_range, - false); + ArtMethod* abs_method = VerifyInvocationArgs(inst, + METHOD_INTERFACE, + is_range, + false); if (abs_method != nullptr) { mirror::Class* called_interface = abs_method->GetDeclaringClass(); if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) { @@ -2845,7 +2824,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_VIRTUAL_QUICK: case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK); - mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range); + ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range); if (called_method != nullptr) { const char* descriptor = called_method->GetReturnTypeDescriptor(); const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false); @@ -3205,8 +3184,8 @@ const RegType& MethodVerifier::GetCaughtExceptionType() { return *common_super; } -mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx, - MethodType method_type) { +ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess( + uint32_t dex_method_idx, MethodType method_type) { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx); const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_); if (klass_type.IsConflict()) { @@ -3220,26 +3199,28 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth } mirror::Class* klass = klass_type.GetClass(); const RegType& referrer = GetDeclaringClass(); - mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx); + auto* cl = Runtime::Current()->GetClassLinker(); + auto pointer_size = cl->GetImagePointerSize(); + ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size); if (res_method == nullptr) { const char* name = dex_file_->GetMethodName(method_id); const Signature signature = dex_file_->GetMethodSignature(method_id); if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) { - res_method = klass->FindDirectMethod(name, signature); + res_method = klass->FindDirectMethod(name, signature, pointer_size); } else if (method_type == METHOD_INTERFACE) { - res_method = klass->FindInterfaceMethod(name, signature); + res_method = klass->FindInterfaceMethod(name, signature, pointer_size); } else { - res_method = klass->FindVirtualMethod(name, signature); + res_method = klass->FindVirtualMethod(name, signature, pointer_size); } if (res_method != nullptr) { - dex_cache_->SetResolvedMethod(dex_method_idx, res_method); + dex_cache_->SetResolvedMethod(dex_method_idx, res_method, pointer_size); } else { // If a virtual or interface method wasn't found with the expected type, look in // the direct methods. This can happen when the wrong invoke type is used or when // a class has changed, and will be flagged as an error in later checks. if (method_type == METHOD_INTERFACE || method_type == METHOD_VIRTUAL) { - res_method = klass->FindDirectMethod(name, signature); + res_method = klass->FindDirectMethod(name, signature, pointer_size); } if (res_method == nullptr) { Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method " @@ -3298,10 +3279,8 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth } template <class T> -mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const Instruction* inst, - MethodType method_type, - bool is_range, - mirror::ArtMethod* res_method) { +ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator( + T* it, const Instruction* inst, MethodType method_type, bool is_range, ArtMethod* res_method) { // We use vAA as our expected arg count, rather than res_method->insSize, because we need to // match the call to the signature. Also, we might be calling through an abstract method // definition (which doesn't have register count values). @@ -3431,7 +3410,7 @@ void MethodVerifier::VerifyInvocationArgsUnresolvedMethod(const Instruction* ins class MethodParamListDescriptorIterator { public: - explicit MethodParamListDescriptorIterator(mirror::ArtMethod* res_method) : + explicit MethodParamListDescriptorIterator(ArtMethod* res_method) : res_method_(res_method), pos_(0), params_(res_method->GetParameterTypeList()), params_size_(params_ == nullptr ? 0 : params_->Size()) { } @@ -3449,21 +3428,19 @@ class MethodParamListDescriptorIterator { } private: - mirror::ArtMethod* res_method_; + ArtMethod* res_method_; size_t pos_; const DexFile::TypeList* params_; const size_t params_size_; }; -mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst, - MethodType method_type, - bool is_range, - bool is_super) { +ArtMethod* MethodVerifier::VerifyInvocationArgs( + const Instruction* inst, MethodType method_type, bool is_range, bool is_super) { // Resolve the method. This could be an abstract or concrete method depending on what sort of call // we're making. const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); - mirror::ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type); + ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type); if (res_method == nullptr) { // error or class is unresolved // Check what we can statically. if (!have_pending_hard_failure_) { @@ -3500,9 +3477,8 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst, is_range, res_method); } -mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, - RegisterLine* reg_line, bool is_range, - bool allow_failure) { +ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line, + bool is_range, bool allow_failure) { if (is_range) { DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK); } else { @@ -3532,13 +3508,15 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst return nullptr; } uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); + auto* cl = Runtime::Current()->GetClassLinker(); + auto pointer_size = cl->GetImagePointerSize(); if (static_cast<int32_t>(vtable_index) >= dispatch_class->GetVTableLength()) { FailOrAbort(this, allow_failure, "Receiver class has not enough vtable slots for quickened invoke at ", work_insn_idx_); return nullptr; } - mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index); + ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index, pointer_size); if (self_->IsExceptionPending()) { FailOrAbort(this, allow_failure, "Unexpected exception pending for quickened invoke at ", work_insn_idx_); @@ -3547,12 +3525,11 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst return res_method; } -mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, - bool is_range) { +ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) { DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_) << PrettyMethod(dex_method_idx_, *dex_file_, true) << "@" << work_insn_idx_; - mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false); + ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false); if (res_method == nullptr) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name(); return nullptr; @@ -4258,7 +4235,7 @@ InstructionFlags* MethodVerifier::CurrentInsnFlags() { const RegType& MethodVerifier::GetMethodReturnType() { if (return_type_ == nullptr) { - if (mirror_method_.Get() != nullptr) { + if (mirror_method_ != nullptr) { mirror::Class* return_type_class = mirror_method_->GetReturnType(can_load_classes_); if (return_type_class != nullptr) { return_type_ = ®_types_.FromClass(mirror_method_->GetReturnTypeDescriptor(), @@ -4285,7 +4262,7 @@ const RegType& MethodVerifier::GetDeclaringClass() { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_)); - if (mirror_method_.Get() != nullptr) { + if (mirror_method_ != nullptr) { mirror::Class* klass = mirror_method_->GetDeclaringClass(); declaring_class_ = ®_types_.FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()); diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 452d1ddad7..873b8ab094 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -152,12 +152,11 @@ class MethodVerifier { Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, - const DexFile::CodeItem* code_item, - Handle<mirror::ArtMethod> method, + const DexFile::CodeItem* code_item, ArtMethod* method, uint32_t method_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static FailureKind VerifyMethod(mirror::ArtMethod* method, bool allow_soft_failures, + static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures, std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -185,21 +184,21 @@ class MethodVerifier { // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding // to the locks held at 'dex_pc' in method 'm'. - static void FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc, + static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc, std::vector<uint32_t>* monitor_enter_dex_pcs) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the accessed field corresponding to the quick instruction's field // offset at 'dex_pc' in method 'm'. - static ArtField* FindAccessedFieldAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc) + static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the invoked method corresponding to the quick instruction's vtable // index at 'dex_pc' in method 'm'. - static mirror::ArtMethod* FindInvokedMethodAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc) + static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(mirror::ArtMethod* m) + static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -212,7 +211,7 @@ class MethodVerifier { MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, uint32_t method_idx, - Handle<mirror::ArtMethod> method, + ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool allow_thread_suspension) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -248,7 +247,7 @@ class MethodVerifier { const RegType& ResolveCheckedClass(uint32_t class_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the method of a quick invoke or null if it cannot be found. - mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line, + ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line, bool is_range, bool allow_failure) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the access field of a quick field access (iget/iput-quick) or null @@ -275,7 +274,7 @@ class MethodVerifier { MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, uint32_t method_idx, - Handle<mirror::ArtMethod> method, uint32_t access_flags, + ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool verify_to_dump, bool allow_thread_suspension) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -302,7 +301,7 @@ class MethodVerifier { Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def_idx, const DexFile::CodeItem* code_item, - Handle<mirror::ArtMethod> method, uint32_t method_access_flags, + ArtMethod* method, uint32_t method_access_flags, bool allow_soft_failures, bool need_precise_constants) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -311,7 +310,7 @@ class MethodVerifier { ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc) + ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap() @@ -573,7 +572,7 @@ class MethodVerifier { * the referrer can access the resolved method. * Does not throw exceptions. */ - mirror::ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) + ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* @@ -598,7 +597,7 @@ class MethodVerifier { * Returns the resolved method on success, null on failure (with *failure * set appropriately). */ - mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst, + ArtMethod* VerifyInvocationArgs(const Instruction* inst, MethodType method_type, bool is_range, bool is_super) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -610,12 +609,12 @@ class MethodVerifier { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template <class T> - mirror::ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst, + ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst, MethodType method_type, bool is_range, - mirror::ArtMethod* res_method) + ArtMethod* res_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) + ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); /* @@ -680,7 +679,7 @@ class MethodVerifier { const uint32_t dex_method_idx_; // The method we're working on. // Its object representation if known. - Handle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_); + ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_); const uint32_t method_access_flags_; // Method's access flags. const RegType* return_type_; // Lazily computed return type of the method. const DexFile* const dex_file_; // The dex file containing the method. diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h index 39df375e1f..f7a8249f19 100644 --- a/runtime/verify_object-inl.h +++ b/runtime/verify_object-inl.h @@ -20,7 +20,6 @@ #include "verify_object.h" #include "gc/heap.h" -#include "mirror/class-inl.h" #include "mirror/object-inl.h" namespace art { diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 28438062df..3dbfe1b516 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -22,6 +22,7 @@ #include "base/logging.h" #include "mirror/class.h" +#include "mirror/throwable.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread-inl.h" @@ -41,7 +42,6 @@ jclass WellKnownClasses::java_lang_Error; jclass WellKnownClasses::java_lang_Object; jclass WellKnownClasses::java_lang_OutOfMemoryError; jclass WellKnownClasses::java_lang_reflect_AbstractMethod; -jclass WellKnownClasses::java_lang_reflect_ArtMethod; jclass WellKnownClasses::java_lang_reflect_Constructor; jclass WellKnownClasses::java_lang_reflect_Field; jclass WellKnownClasses::java_lang_reflect_Method; @@ -165,11 +165,13 @@ static jclass CacheClass(JNIEnv* env, const char* jni_class_name) { static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) { - jfieldID fid = (is_static ? - env->GetStaticFieldID(c, name, signature) : - env->GetFieldID(c, name, signature)); + jfieldID fid = is_static ? env->GetStaticFieldID(c, name, signature) : + env->GetFieldID(c, name, signature); if (fid == nullptr) { ScopedObjectAccess soa(env); + if (soa.Self()->IsExceptionPending()) { + LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump() << '\n'; + } std::ostringstream os; WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail); LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature << "\": " @@ -180,11 +182,13 @@ static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static, jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) { - jmethodID mid = (is_static ? - env->GetStaticMethodID(c, name, signature) : - env->GetMethodID(c, name, signature)); + jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : + env->GetMethodID(c, name, signature); if (mid == nullptr) { ScopedObjectAccess soa(env); + if (soa.Self()->IsExceptionPending()) { + LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump() << '\n'; + } std::ostringstream os; WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail); LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\": " @@ -213,7 +217,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError"); java_lang_Error = CacheClass(env, "java/lang/Error"); java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod"); - java_lang_reflect_ArtMethod = CacheClass(env, "java/lang/reflect/ArtMethod"); java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor"); java_lang_reflect_Field = CacheClass(env, "java/lang/reflect/Field"); java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method"); @@ -334,7 +337,7 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;"); java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "stackState", "Ljava/lang/Object;"); java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;"); - java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "Ljava/lang/reflect/ArtMethod;"); + java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "J"); java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;"); java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I"); java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J"); diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index acb26560ec..d25d1c3f7d 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -53,7 +53,6 @@ struct WellKnownClasses { static jclass java_lang_Object; static jclass java_lang_OutOfMemoryError; static jclass java_lang_reflect_AbstractMethod; - static jclass java_lang_reflect_ArtMethod; static jclass java_lang_reflect_Constructor; static jclass java_lang_reflect_Field; static jclass java_lang_reflect_Method; diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index 76ef4a9d67..e626e48be9 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -36,7 +36,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); // Given the method name and the number of times the method has been called, diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc index c40de7e247..f66b715128 100644 --- a/test/004-StackWalk/stack_walk_jni.cc +++ b/test/004-StackWalk/stack_walk_jni.cc @@ -36,7 +36,7 @@ class TestReferenceMapVisitor : public CheckReferenceMapVisitor { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); StringPiece m_name(m->GetName()); // Given the method name and the number of times the method has been called, diff --git a/test/004-UnsafeTest/unsafe_test.cc b/test/004-UnsafeTest/unsafe_test.cc index ca0e39e122..3b0cf235a6 100644 --- a/test/004-UnsafeTest/unsafe_test.cc +++ b/test/004-UnsafeTest/unsafe_test.cc @@ -14,9 +14,9 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "jni.h" #include "mirror/array.h" -#include "mirror/art_method-inl.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc index 0ef2964e35..33bdc200db 100644 --- a/test/454-get-vreg/get_vreg_jni.cc +++ b/test/454-get-vreg/get_vreg_jni.cc @@ -15,8 +15,8 @@ */ #include "arch/context.h" +#include "art_method-inl.h" #include "jni.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "thread.h" @@ -34,7 +34,7 @@ class TestVisitor : public StackVisitor { found_method_index_(0) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); if (m_name.compare("testSimpleVReg") == 0) { diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc index dffbfa47d8..754118935c 100644 --- a/test/455-set-vreg/set_vreg_jni.cc +++ b/test/455-set-vreg/set_vreg_jni.cc @@ -15,8 +15,8 @@ */ #include "arch/context.h" +#include "art_method-inl.h" #include "jni.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "thread.h" @@ -33,7 +33,7 @@ class TestVisitor : public StackVisitor { this_value_(this_value) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); if (m_name.compare("testIntVReg") == 0) { diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc index 193ab9dc4e..96f0e52995 100644 --- a/test/457-regs/regs_jni.cc +++ b/test/457-regs/regs_jni.cc @@ -15,8 +15,8 @@ */ #include "arch/context.h" +#include "art_method-inl.h" #include "jni.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "thread.h" @@ -32,7 +32,7 @@ class TestVisitor : public StackVisitor { : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); if (m_name.compare("mergeOk") == 0) { diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc index a8ef684e93..23fe43d906 100644 --- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc +++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc @@ -15,8 +15,8 @@ */ #include "arch/context.h" +#include "art_method-inl.h" #include "jni.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "thread.h" @@ -34,7 +34,7 @@ class TestVisitor : public StackVisitor { found_method_index_(0) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); if (m_name.compare("testThisWithInstanceCall") == 0) { diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc index 4724e8ebe4..c4f415b3f9 100644 --- a/test/466-get-live-vreg/get_live_vreg_jni.cc +++ b/test/466-get-live-vreg/get_live_vreg_jni.cc @@ -15,8 +15,8 @@ */ #include "arch/context.h" +#include "art_method-inl.h" #include "jni.h" -#include "mirror/art_method-inl.h" #include "scoped_thread_state_change.h" #include "stack.h" #include "thread.h" @@ -31,7 +31,7 @@ class TestVisitor : public StackVisitor { : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::ArtMethod* m = GetMethod(); + ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); if (m_name.compare("testLiveArgument") == 0) { |