diff options
Diffstat (limited to 'compiler/optimizing')
24 files changed, 184 insertions, 159 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index a5c6f23343..58416ee93b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -665,9 +665,8 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, *dex_compilation_unit_->GetDexFile()))); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader()))); - mirror::ArtMethod* resolved_method = compiler_driver_->ResolveMethod( - soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, - optimized_invoke_type); + ArtMethod* resolved_method = compiler_driver_->ResolveMethod( + soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, optimized_invoke_type); if (resolved_method == nullptr) { MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 0e776b31f7..a5d5305836 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -114,18 +114,24 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) { return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue(); } +size_t CodeGenerator::GetCachePointerOffset(uint32_t index) { + auto pointer_size = InstructionSetPointerSize(GetInstructionSet()); + return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index; +} + void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) { Initialize(); if (!is_leaf) { MarkNotLeaf(); } + const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet()); InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs() + GetGraph()->GetTemporariesVRegSlots() + 1 /* filler */, 0, /* the baseline compiler does not have live registers at slow path */ 0, /* the baseline compiler does not have live registers at slow path */ GetGraph()->GetMaximumNumberOfOutVRegs() - + 1 /* current method */, + + (is_64_bit ? 2 : 1) /* current method */, GetGraph()->GetBlocks()); CompileInternal(allocator, /* is_baseline */ true); } @@ -270,7 +276,8 @@ int32_t CodeGenerator::GetStackSlot(HLocal* local) const { uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); if (reg_number >= number_of_locals) { // Local is a parameter of the method. It is stored in the caller's frame. - return GetFrameSize() + kVRegSize // ART method + // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode. + return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet()) // ART method + (reg_number - number_of_locals) * kVRegSize; } else { // Local is a temporary in this method. It is stored in this method's frame. diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index bdbd571133..c6317f18d3 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -145,7 +145,7 @@ class CodeGenerator { size_t GetStackSlotOfParameter(HParameterValue* parameter) const { // Note that this follows the current calling convention. return GetFrameSize() - + kVRegSize // Art method + + InstructionSetPointerSize(GetInstructionSet()) // Art method + parameter->GetIndex() * kVRegSize; } @@ -266,6 +266,8 @@ class CodeGenerator { // Note: this method assumes we always have the same pointer size, regardless // of the architecture. static size_t GetCacheOffset(uint32_t index); + // Pointer variant for ArtMethod and ArtField arrays. + size_t GetCachePointerOffset(uint32_t index); void EmitParallelMoves(Location from1, Location to1, @@ -469,11 +471,13 @@ class CallingConvention { CallingConvention(const C* registers, size_t number_of_registers, const F* fpu_registers, - size_t number_of_fpu_registers) + size_t number_of_fpu_registers, + size_t pointer_size) : registers_(registers), number_of_registers_(number_of_registers), fpu_registers_(fpu_registers), - number_of_fpu_registers_(number_of_fpu_registers) {} + number_of_fpu_registers_(number_of_fpu_registers), + pointer_size_(pointer_size) {} size_t GetNumberOfRegisters() const { return number_of_registers_; } size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; } @@ -490,8 +494,8 @@ class CallingConvention { size_t GetStackOffsetOf(size_t index) const { // We still reserve the space for parameters passed by registers. - // Add one for the method pointer. - return (index + 1) * kVRegSize; + // Add space for the method pointer. + return pointer_size_ + index * kVRegSize; } private: @@ -499,6 +503,7 @@ class CallingConvention { const size_t number_of_registers_; const F* fpu_registers_; const size_t number_of_fpu_registers_; + const size_t pointer_size_; DISALLOW_COPY_AND_ASSIGN(CallingConvention); }; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 13775fed40..2b1131d65f 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -17,13 +17,13 @@ #include "code_generator_arm.h" #include "arch/arm/instruction_set_features_arm.h" +#include "art_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "intrinsics.h" #include "intrinsics_arm.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "thread.h" #include "utils/arm/assembler_arm.h" #include "utils/arm/managed_register_arm.h" @@ -1312,8 +1312,8 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { } Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArmPointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1326,7 +1326,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) { } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); @@ -1346,8 +1346,8 @@ void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1365,7 +1365,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value(); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // LR = temp->GetEntryPoint(); @@ -3796,12 +3796,12 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value()); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); __ LoadFromOffset( - kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); + kLoadWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( @@ -3858,7 +3858,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { Register out = load->GetLocations()->Out().AsRegister<Register>(); codegen_->LoadCurrentMethod(out); - __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); __ cmp(out, ShifterOperand(0)); @@ -4081,7 +4081,7 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset()); // LR = temp[offset_of_quick_compiled_code] __ LoadFromOffset(kLoadWord, LR, temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArmWordSize).Int32Value()); // LR() __ blx(LR); @@ -4091,14 +4091,13 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; __ LoadFromOffset( - kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); + kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); // temp = temp[index_in_cache] __ LoadFromOffset( kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); // LR = temp[offset_of_quick_compiled_code] - __ LoadFromOffset(kLoadWord, LR, temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArmWordSize).Int32Value()); + __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArmWordSize).Int32Value()); // LR() __ blx(LR); } else { diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 1a498e1148..c410fa80ba 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -54,7 +54,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegis : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kArmPointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -72,7 +73,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, SRegister> : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFpuRegisters, - kParameterFpuRegistersLength) {} + kParameterFpuRegistersLength, + kArmPointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 0222f93da4..55ef66fa99 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -17,6 +17,7 @@ #include "code_generator_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" #include "common_arm64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" @@ -24,8 +25,7 @@ #include "intrinsics.h" #include "intrinsics_arm64.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "offsets.h" #include "thread.h" #include "utils/arm64/assembler_arm64.h" @@ -65,7 +65,6 @@ using helpers::WRegisterFrom; using helpers::XRegisterFrom; using helpers::ARM64EncodableConstantOrRegister; -static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>); static constexpr int kCurrentMethodStackOffset = 0; inline Condition ARM64Condition(IfCondition cond) { @@ -968,7 +967,7 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type, void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) { DCHECK(RequiresCurrentMethod()); - DCHECK(current_method.IsW()); + CHECK(current_method.IsX()); __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset)); } @@ -1940,12 +1939,12 @@ void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value(); Location receiver = invoke->GetLocations()->InAt(0); Offset class_offset = mirror::Object::ClassOffset(); - Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); // The register ip1 is required to be used for the hidden argument in // art_quick_imt_conflict_trampoline, so prevent VIXL from using it. @@ -1957,16 +1956,16 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok // temp = object->GetClass(); if (receiver.IsStackSlot()) { - __ Ldr(temp, StackOperandFrom(receiver)); - __ Ldr(temp, HeapOperand(temp, class_offset)); + __ Ldr(temp.W(), StackOperandFrom(receiver)); + __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset)); } else { - __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); + __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - __ Ldr(temp, HeapOperand(temp, method_offset)); + __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); - __ Ldr(lr, HeapOperand(temp, entry_point)); + __ Ldr(lr, MemOperand(temp, entry_point.Int32Value())); // lr(); __ Blr(lr); DCHECK(!codegen_->IsLeafMethod()); @@ -2007,8 +2006,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) { // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention. DCHECK(temp.Is(kArtMethodRegister)); - size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() + - invoke->GetDexMethodIndex() * kHeapRefSize; + size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex()); // TODO: Implement all kinds of calls: // 1) boot -> boot @@ -2019,23 +2017,24 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok if (invoke->IsStringInit()) { // temp = thread->string_init_entrypoint - __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset())); + __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset())); // LR = temp->entry_point_from_quick_compiled_code_; - __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArm64WordSize))); + __ Ldr(lr, MemOperand( + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value())); // lr() __ Blr(lr); } else { // temp = method; - LoadCurrentMethod(temp); + LoadCurrentMethod(temp.X()); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); + __ Ldr(temp.W(), MemOperand(temp.X(), + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); // temp = temp[index_in_cache]; - __ Ldr(temp, HeapOperand(temp, index_in_cache)); + __ Ldr(temp.X(), MemOperand(temp, index_in_cache)); // lr = temp->entry_point_from_quick_compiled_code_; - __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( - kArm64WordSize))); + __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64WordSize).Int32Value())); // lr(); __ Blr(lr); } else { @@ -2056,7 +2055,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir } BlockPoolsScope block_pools(GetVIXLAssembler()); - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); codegen_->GenerateStaticOrDirectCall(invoke, temp); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } @@ -2068,27 +2067,27 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); - Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); - size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0)); + size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kArm64PointerSize).SizeValue(); Offset class_offset = mirror::Object::ClassOffset(); - Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); + Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize); BlockPoolsScope block_pools(GetVIXLAssembler()); // temp = object->GetClass(); if (receiver.IsStackSlot()) { - __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex())); - __ Ldr(temp, HeapOperand(temp, class_offset)); + __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex())); + __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset)); } else { DCHECK(receiver.IsRegister()); - __ Ldr(temp, HeapOperandFrom(receiver, class_offset)); + __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - __ Ldr(temp, HeapOperand(temp, method_offset)); + __ Ldr(temp, MemOperand(temp, method_offset)); // lr = temp->GetEntryPoint(); - __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue())); + __ Ldr(lr, MemOperand(temp, entry_point.SizeValue())); // lr(); __ Blr(lr); DCHECK(!codegen_->IsLeafMethod()); @@ -2107,12 +2106,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( @@ -2159,8 +2158,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { codegen_->AddSlowPath(slow_path); Register out = OutputRegister(load); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); + codegen_->LoadCurrentMethod(out.X()); + __ Ldr(out, MemOperand(out.X(), ArtMethod::DeclaringClassOffset().Int32Value())); __ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ Cbz(out, slow_path->GetEntryLabel()); @@ -2288,7 +2287,7 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) { locations->SetOut(LocationFrom(x0)); locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, - void*, uint32_t, int32_t, mirror::ArtMethod*>(); + void*, uint32_t, int32_t, ArtMethod*>(); } void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { @@ -2296,17 +2295,16 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { InvokeRuntimeCallingConvention calling_convention; Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); DCHECK(type_index.Is(w0)); - Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); - DCHECK(current_method.Is(w2)); - codegen_->LoadCurrentMethod(current_method); + Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimLong); + DCHECK(current_method.Is(x2)); + codegen_->LoadCurrentMethod(current_method.X()); __ Mov(type_index, instruction->GetTypeIndex()); codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), nullptr); - CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, - void*, uint32_t, int32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>(); } void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { @@ -2316,7 +2314,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); } void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { @@ -2325,14 +2323,14 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) DCHECK(type_index.Is(w0)); Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot); DCHECK(current_method.Is(w1)); - codegen_->LoadCurrentMethod(current_method); + codegen_->LoadCurrentMethod(current_method.X()); __ Mov(type_index, instruction->GetTypeIndex()); codegen_->InvokeRuntime( GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, instruction->GetDexPc(), nullptr); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); } void LocationsBuilderARM64::VisitNot(HNot* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 8aeea5400f..3486cdebec 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -45,7 +45,7 @@ static const vixl::FPRegister kParameterFPRegisters[] = { static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters); const vixl::Register tr = vixl::x18; // Thread Register -static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke. +static const vixl::Register kArtMethodRegister = vixl::x0; // Method register on invoke. const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1); const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31); @@ -94,7 +94,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<vixl::Register, : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kArm64PointerSize) {} Location GetReturnLocation(Primitive::Type return_type); @@ -108,7 +109,8 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFPRegisters, - kParameterFPRegistersLength) {} + kParameterFPRegistersLength, + kArm64PointerSize) {} Location GetReturnLocation(Primitive::Type return_type) { return ARM64ReturnLocation(return_type); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 2848a48a64..60fd29bf74 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -16,6 +16,7 @@ #include "code_generator_x86.h" +#include "art_method.h" #include "code_generator_utils.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" @@ -23,8 +24,7 @@ #include "intrinsics.h" #include "intrinsics_x86.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "thread.h" #include "utils/assembler.h" #include "utils/stack_checks.h" @@ -1275,8 +1275,8 @@ void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) { void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kX86PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1292,7 +1292,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); __ call(Address( - temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); @@ -1307,8 +1307,8 @@ void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); @@ -1328,7 +1328,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) // temp = temp->GetImtEntryAt(method_offset); __ movl(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86WordSize).Int32Value())); DCHECK(!codegen_->IsLeafMethod()); @@ -3207,18 +3207,19 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() __ call(Address( - temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); } else { // temp = method; LoadCurrentMethod(temp); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); + __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + __ movl(temp, Address(temp, + CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()))); // (temp + offset_of_quick_compiled_code)() __ call(Address(temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); } else { __ call(GetFrameEntryLabel()); } @@ -4278,11 +4279,11 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( @@ -4337,7 +4338,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { Register out = load->GetLocations()->Out().AsRegister<Register>(); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ testl(out, out); diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 5a5a37b3fe..43214fe7d5 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -52,7 +52,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmReg : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kX86PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -64,7 +65,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegiste kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFpuRegisters, - kParameterFpuRegistersLength) {} + kParameterFpuRegistersLength, + kX86PointerSize) {} RegisterPair GetRegisterPairAt(size_t argument_index) { DCHECK_LT(argument_index + 1, GetNumberOfRegisters()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e633970279..b0174b9b16 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -16,14 +16,14 @@ #include "code_generator_x86_64.h" +#include "art_method.h" #include "code_generator_utils.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "intrinsics.h" #include "intrinsics_x86_64.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" -#include "mirror/class.h" +#include "mirror/class-inl.h" #include "mirror/object_reference.h" #include "thread.h" #include "utils/assembler.h" @@ -374,18 +374,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo // temp = thread->string_init_entrypoint __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } else { // temp = method; LoadCurrentMethod(temp); if (!invoke->IsRecursive()) { // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); + __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + __ movq(temp, Address( + temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()))); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } else { __ call(&frame_entry_label_); @@ -545,7 +546,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() { } } - __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI)); + __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI)); } void CodeGeneratorX86_64::GenerateFrameExit() { @@ -585,7 +586,7 @@ void CodeGeneratorX86_64::Bind(HBasicBlock* block) { void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) { DCHECK(RequiresCurrentMethod()); - __ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset)); + __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset)); } Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const { @@ -1383,8 +1384,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) } CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>(); - size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() + - invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); + size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -1397,9 +1398,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); - __ movl(temp, Address(temp, method_offset)); + __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); @@ -1415,8 +1416,8 @@ void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) { // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>(); - uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + - (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); + uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset( + invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value(); LocationSummary* locations = invoke->GetLocations(); Location receiver = locations->InAt(0); size_t class_offset = mirror::Object::ClassOffset().SizeValue(); @@ -1434,9 +1435,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo } codegen_->MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetImtEntryAt(method_offset); - __ movl(temp, Address(temp, method_offset)); + __ movq(temp, Address(temp, method_offset)); // call temp->GetEntryPoint(); - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); @@ -4125,11 +4126,11 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); @@ -4174,7 +4175,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { CpuRegister out = load->GetLocations()->Out().AsRegister<CpuRegister>(); codegen_->LoadCurrentMethod(CpuRegister(out)); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(out, ArtMethod::DeclaringClassOffset().Int32Value())); __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); __ testl(out, out); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 480ea6b9c9..4be401a0fa 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -50,7 +50,8 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatR : CallingConvention(kRuntimeParameterCoreRegisters, kRuntimeParameterCoreRegistersLength, kRuntimeParameterFpuRegisters, - kRuntimeParameterFpuRegistersLength) {} + kRuntimeParameterFpuRegistersLength, + kX86_64PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); @@ -62,7 +63,8 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis kParameterCoreRegisters, kParameterCoreRegistersLength, kParameterFloatRegisters, - kParameterFloatRegistersLength) {} + kParameterFloatRegistersLength, + kX86_64PointerSize) {} private: DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index d88424cb5e..8253a43389 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -16,6 +16,7 @@ #include "inliner.h" +#include "art_method-inl.h" #include "builder.h" #include "class_linker.h" #include "constant_folding.h" @@ -23,7 +24,6 @@ #include "driver/compiler_driver-inl.h" #include "driver/dex_compilation_unit.h" #include "instruction_simplifier.h" -#include "mirror/art_method-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" @@ -81,11 +81,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file))); Handle<mirror::ClassLoader> class_loader(hs.NewHandle( soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader()))); - Handle<mirror::ArtMethod> resolved_method(hs.NewHandle( - compiler_driver_->ResolveMethod( - soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type))); + ArtMethod* resolved_method(compiler_driver_->ResolveMethod( + soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type)); - if (resolved_method.Get() == nullptr) { + if (resolved_method == nullptr) { VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file); return false; } @@ -149,7 +148,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, return true; } -bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, +bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, HInvoke* invoke_instruction, uint32_t method_index, bool can_use_dex_cache) const { @@ -172,6 +171,7 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, graph_->GetArena(), caller_dex_file, method_index, + compiler_driver_->GetInstructionSet(), graph_->IsDebuggable(), graph_->GetCurrentInstructionId()); diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 1dbc7d392b..831bdf22a0 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -48,7 +48,7 @@ class HInliner : public HOptimization { private: bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const; - bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, + bool TryBuildAndInline(ArtMethod* resolved_method, HInvoke* invoke_instruction, uint32_t method_index, bool can_use_dex_cache) const; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index dccfe9a0ca..db35b8f767 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -17,11 +17,11 @@ #include "intrinsics_arm.h" #include "arch/arm/instruction_set_features_arm.h" +#include "art_method.h" #include "code_generator_arm.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/arm/assembler_arm.h" diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 2c4fab0465..957373f6f9 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -17,12 +17,12 @@ #include "intrinsics_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" #include "code_generator_arm64.h" #include "common_arm64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/arm64/assembler_arm64.h" diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 28b7a07cf9..989dd0df30 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -19,11 +19,11 @@ #include <limits> #include "arch/x86/instruction_set_features_x86.h" +#include "art_method.h" #include "code_generator_x86.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/x86/assembler_x86.h" diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 0efa714a23..c245cb646f 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -19,11 +19,11 @@ #include <limits> #include "arch/x86_64/instruction_set_features_x86_64.h" +#include "art_method-inl.h" #include "code_generator_x86_64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" #include "mirror/array-inl.h" -#include "mirror/art_method.h" #include "mirror/string.h" #include "thread.h" #include "utils/x86_64/assembler_x86_64.h" diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 77b587e74f..ef60d7680b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -120,6 +120,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { HGraph(ArenaAllocator* arena, const DexFile& dex_file, uint32_t method_idx, + InstructionSet instruction_set, bool debuggable = false, int start_instruction_id = 0) : arena_(arena), @@ -137,6 +138,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { current_instruction_id_(start_instruction_id), dex_file_(dex_file), method_idx_(method_idx), + instruction_set_(instruction_set), cached_null_constant_(nullptr), cached_int_constants_(std::less<int32_t>(), arena->Adapter()), cached_float_constants_(std::less<int32_t>(), arena->Adapter()), @@ -359,6 +361,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { // The method index in the dex file. const uint32_t method_idx_; + const InstructionSet instruction_set_; + // Cached constants. HNullConstant* cached_null_constant_; ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_; diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index 7aea249c42..b0d1433667 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -31,7 +31,7 @@ namespace art { // Run the tests only on host. #ifndef HAVE_ANDROID_OS -class OptimizingCFITest : public CFITest { +class OptimizingCFITest : public CFITest { public: // Enable this flag to generate the expected outputs. static constexpr bool kGenerateExpected = false; diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc index 2125f6eb01..9ccc0113f6 100644 --- a/compiler/optimizing/optimizing_cfi_test_expected.inc +++ b/compiler/optimizing/optimizing_cfi_test_expected.inc @@ -32,7 +32,7 @@ static constexpr uint8_t expected_cfi_kThumb2[] = { // 0x00000012: .cfi_def_cfa_offset: 64 static constexpr uint8_t expected_asm_kArm64[] = { - 0xE0, 0x0F, 0x1C, 0xB8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9, + 0xE0, 0x0F, 0x1C, 0xF8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9, 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6, }; @@ -41,7 +41,7 @@ static constexpr uint8_t expected_cfi_kArm64[] = { 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40, }; -// 0x00000000: str w0, [sp, #-64]! +// 0x00000000: str x0, [sp, #-64]! // 0x00000004: .cfi_def_cfa_offset: 64 // 0x00000004: stp x19, x20, [sp, #40] // 0x00000008: .cfi_offset: r19 at cfa-24 @@ -99,13 +99,13 @@ static constexpr uint8_t expected_cfi_kX86[] = { static constexpr uint8_t expected_asm_kX86_64[] = { 0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, - 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x89, 0x3C, 0x24, 0xF2, - 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, - 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3, + 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24, + 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, + 0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3, }; static constexpr uint8_t expected_cfi_kX86_64[] = { 0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E, - 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x43, 0x0A, 0x47, 0xDD, 0x47, + 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6, 0x41, 0x0B, 0x0E, 0x40, }; @@ -121,21 +121,20 @@ static constexpr uint8_t expected_cfi_kX86_64[] = { // 0x0000000d: .cfi_offset: r30 at cfa-32 // 0x0000000d: movsd [rsp + 24], xmm12 // 0x00000014: .cfi_offset: r29 at cfa-40 -// 0x00000014: mov [rsp], edi -// 0x00000017: .cfi_remember_state -// 0x00000017: movsd xmm12, [rsp + 24] -// 0x0000001e: .cfi_restore: r29 -// 0x0000001e: movsd xmm13, [rsp + 32] -// 0x00000025: .cfi_restore: r30 -// 0x00000025: addq rsp, 40 -// 0x00000029: .cfi_def_cfa_offset: 24 -// 0x00000029: pop rbx -// 0x0000002a: .cfi_def_cfa_offset: 16 -// 0x0000002a: .cfi_restore: r3 -// 0x0000002a: pop rbp -// 0x0000002b: .cfi_def_cfa_offset: 8 -// 0x0000002b: .cfi_restore: r6 -// 0x0000002b: ret -// 0x0000002c: .cfi_restore_state -// 0x0000002c: .cfi_def_cfa_offset: 64 - +// 0x00000014: movq [rsp], rdi +// 0x00000018: .cfi_remember_state +// 0x00000018: movsd xmm12, [rsp + 24] +// 0x0000001f: .cfi_restore: r29 +// 0x0000001f: movsd xmm13, [rsp + 32] +// 0x00000026: .cfi_restore: r30 +// 0x00000026: addq rsp, 40 +// 0x0000002a: .cfi_def_cfa_offset: 24 +// 0x0000002a: pop rbx +// 0x0000002b: .cfi_def_cfa_offset: 16 +// 0x0000002b: .cfi_restore: r3 +// 0x0000002b: pop rbp +// 0x0000002c: .cfi_def_cfa_offset: 8 +// 0x0000002c: .cfi_restore: r6 +// 0x0000002c: ret +// 0x0000002d: .cfi_restore_state +// 0x0000002d: .cfi_def_cfa_offset: 64 diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 8bb5d8ebae..c7b2c67019 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -19,6 +19,7 @@ #include <fstream> #include <stdint.h> +#include "art_method-inl.h" #include "base/arena_allocator.h" #include "base/dumpable.h" #include "base/timing_logger.h" @@ -44,7 +45,6 @@ #include "intrinsics.h" #include "licm.h" #include "jni/quick/jni_compiler.h" -#include "mirror/art_method-inl.h" #include "nodes.h" #include "prepare_for_register_allocation.h" #include "reference_type_propagation.h" @@ -196,7 +196,7 @@ class OptimizingCompiler FINAL : public Compiler { return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file); } - uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE + uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); @@ -514,7 +514,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite ArenaAllocator arena(Runtime::Current()->GetArenaPool()); HGraph* graph = new (&arena) HGraph( - &arena, dex_file, method_idx, compiler_driver->GetCompilerOptions().GetDebuggable()); + &arena, dex_file, method_idx, compiler_driver->GetInstructionSet(), + compiler_driver->GetCompilerOptions().GetDebuggable()); // For testing purposes, we put a special marker on method names that should be compiled // with this compiler. This makes sure we're not regressing. diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 4f8ec65e43..3ef96faab3 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -74,7 +74,8 @@ void RemoveSuspendChecks(HGraph* graph) { inline HGraph* CreateGraph(ArenaAllocator* allocator) { return new (allocator) HGraph( - allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1); + allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, kRuntimeISA, + false); } // Create a control-flow graph from Dex instructions. diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 12b1c2b9bd..e93e06118c 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -16,7 +16,7 @@ #include "reference_type_propagation.h" -#include "class_linker.h" +#include "class_linker-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "scoped_thread_state_change.h" diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index f53f846326..5f439c86d9 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -71,7 +71,9 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters()); // Always reserve for the current method and the graph's max out registers. // TODO: compute it instead. - reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs(); + // ArtMethod* takes 2 vregs for 64 bits. + reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize + + codegen->GetGraph()->GetMaximumNumberOfOutVRegs(); } bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED, |