diff options
author | Nicolas Geoffray <ngeoffray@google.com> | 2015-03-13 16:36:36 +0000 |
---|---|---|
committer | Nicolas Geoffray <ngeoffray@google.com> | 2015-03-13 16:47:44 +0000 |
commit | a8ac9130b872c080299afacf5dcaab513d13ea87 (patch) | |
tree | 2bd0a2a88cbb6e7a3ae79dff84c466bed9189eb5 /compiler/optimizing | |
parent | cc22e3946baf035c8732e9417ab132bfe663aa45 (diff) | |
download | android_art-a8ac9130b872c080299afacf5dcaab513d13ea87.tar.gz android_art-a8ac9130b872c080299afacf5dcaab513d13ea87.tar.bz2 android_art-a8ac9130b872c080299afacf5dcaab513d13ea87.zip |
Refactor code in preparation of correct stack maps in slow path.
Move the logic of saving/restoring live registers in slow path
in the SlowPathCode method. Also add a RecordPcInfo helper to
SlowPathCode, that will act as the placeholder of saving correct
stack maps.
Change-Id: I25c2bc7a642ef854bbc8a3eb570e5c8c8d2d030c
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 96 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 19 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 18 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 18 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 24 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 32 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 5 |
9 files changed, 118 insertions, 104 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index a6ab20812e..742d83e093 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -750,78 +750,82 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { } } -void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) { +void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const { + LocationSummary* locations = suspend_check->GetLocations(); + HBasicBlock* block = suspend_check->GetBlock(); + DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); + DCHECK(block->IsLoopHeader()); + + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + HInstruction* current = it.Current(); + LiveInterval* interval = current->GetLiveInterval(); + // We only need to clear bits of loop phis containing objects and allocated in register. + // Loop phis allocated on stack already have the object in the stack. + if (current->GetType() == Primitive::kPrimNot + && interval->HasRegister() + && interval->HasSpillSlot()) { + locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); + } + } +} + +void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) { + HParallelMove parallel_move(GetGraph()->GetArena()); + parallel_move.AddMove(from1, to1, nullptr); + parallel_move.AddMove(from2, to2, nullptr); + GetMoveResolver()->EmitNativeCode(¶llel_move); +} + +void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) { + codegen->RecordPcInfo(instruction, dex_pc); +} + +void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { RegisterSet* register_set = locations->GetLiveRegisters(); - size_t stack_offset = first_register_slot_in_slow_path_; - for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { - if (!IsCoreCalleeSaveRegister(i)) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { + if (!codegen->IsCoreCalleeSaveRegister(i)) { if (register_set->ContainsCoreRegister(i)) { // If the register holds an object, update the stack mask. if (locations->RegisterContainsObject(i)) { locations->SetStackBit(stack_offset / kVRegSize); } - DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); - stack_offset += SaveCoreRegister(stack_offset, i); + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + stack_offset += codegen->SaveCoreRegister(stack_offset, i); } } } - for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { - if (!IsFloatingPointCalleeSaveRegister(i)) { + for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) { + if (!codegen->IsFloatingPointCalleeSaveRegister(i)) { if (register_set->ContainsFloatingPointRegister(i)) { - DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); - stack_offset += SaveFloatingPointRegister(stack_offset, i); + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i); } } } } -void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) { +void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { RegisterSet* register_set = locations->GetLiveRegisters(); - size_t stack_offset = first_register_slot_in_slow_path_; - for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { - if (!IsCoreCalleeSaveRegister(i)) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { + if (!codegen->IsCoreCalleeSaveRegister(i)) { if (register_set->ContainsCoreRegister(i)) { - DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); - stack_offset += RestoreCoreRegister(stack_offset, i); + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + stack_offset += codegen->RestoreCoreRegister(stack_offset, i); } } } - for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { - if (!IsFloatingPointCalleeSaveRegister(i)) { + for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) { + if (!codegen->IsFloatingPointCalleeSaveRegister(i)) { if (register_set->ContainsFloatingPointRegister(i)) { - DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); - stack_offset += RestoreFloatingPointRegister(stack_offset, i); + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i); } } } } -void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const { - LocationSummary* locations = suspend_check->GetLocations(); - HBasicBlock* block = suspend_check->GetBlock(); - DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); - DCHECK(block->IsLoopHeader()); - - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); - LiveInterval* interval = current->GetLiveInterval(); - // We only need to clear bits of loop phis containing objects and allocated in register. - // Loop phis allocated on stack already have the object in the stack. - if (current->GetType() == Primitive::kPrimNot - && interval->HasRegister() - && interval->HasSpillSlot()) { - locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); - } - } -} - -void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) { - HParallelMove parallel_move(GetGraph()->GetArena()); - parallel_move.AddMove(from1, to1, nullptr); - parallel_move.AddMove(from2, to2, nullptr); - GetMoveResolver()->EmitNativeCode(¶llel_move); -} - } // namespace art diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index b8f4572abd..81fc684ccf 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -73,6 +73,10 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> { virtual void EmitNativeCode(CodeGenerator* codegen) = 0; + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations); + void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc); + private: DISALLOW_COPY_AND_ASSIGN(SlowPathCode); }; @@ -182,8 +186,6 @@ class CodeGenerator { void BuildNativeGCMap( std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const; void BuildStackMaps(std::vector<uint8_t>* vector); - void SaveLiveRegisters(LocationSummary* locations); - void RestoreLiveRegisters(LocationSummary* locations); bool IsLeafMethod() const { return is_leaf_; @@ -267,6 +269,15 @@ class CodeGenerator { } } + size_t GetFirstRegisterSlotInSlowPath() const { + return first_register_slot_in_slow_path_; + } + + uint32_t FrameEntrySpillSize() const { + return GetFpuSpillSize() + GetCoreSpillSize(); + } + + protected: CodeGenerator(HGraph* graph, size_t number_of_core_registers, @@ -326,10 +337,6 @@ class CodeGenerator { return POPCOUNT(core_spill_mask_) * GetWordSize(); } - uint32_t FrameEntrySpillSize() const { - return GetFpuSpillSize() + GetCoreSpillSize(); - } - bool HasAllocatedCalleeSaveRegisters() const { // We check the core registers against 1 because it always comprises the return PC. return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1) diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 07cc41a8d5..aed8c0672c 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -114,10 +114,10 @@ class SuspendCheckSlowPathARM : public SlowPathCodeARM { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(instruction_->GetLocations()); + SaveLiveRegisters(codegen, instruction_->GetLocations()); arm_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); - codegen->RestoreLiveRegisters(instruction_->GetLocations()); + RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ b(GetReturnLabel()); } else { @@ -188,7 +188,7 @@ class LoadClassSlowPathARM : public SlowPathCodeARM { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex()); @@ -204,7 +204,7 @@ class LoadClassSlowPathARM : public SlowPathCodeARM { DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ b(GetExitLabel()); } @@ -235,7 +235,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); @@ -244,7 +244,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc()); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ b(GetExitLabel()); } @@ -272,7 +272,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -291,7 +291,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ b(GetExitLabel()); } @@ -1205,6 +1205,7 @@ void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); codegen_->GenerateStaticOrDirectCall(invoke, temp); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) { @@ -3861,7 +3862,6 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, __ bl(GetFrameEntryLabel()); } - RecordPcInfo(invoke, invoke->GetDexPc()); DCHECK(!IsLeafMethod()); } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index c21084a6fe..93c4ce52b3 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -191,7 +191,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex()); @@ -213,7 +213,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ B(GetExitLabel()); } @@ -244,7 +244,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W()); @@ -255,7 +255,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { Primitive::Type type = instruction_->GetType(); arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type); - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ B(GetExitLabel()); } @@ -292,11 +292,11 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(instruction_->GetLocations()); + SaveLiveRegisters(codegen, instruction_->GetLocations()); arm64_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc()); CheckEntrypointTypes<kQuickTestSuspend, void, void>(); - codegen->RestoreLiveRegisters(instruction_->GetLocations()); + RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ B(GetReturnLabel()); } else { @@ -338,7 +338,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -360,7 +360,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ B(GetExitLabel()); } @@ -1920,7 +1920,6 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok __ Bl(&frame_entry_label_); } - RecordPcInfo(invoke, invoke->GetDexPc()); DCHECK(!IsLeafMethod()); } @@ -1931,6 +1930,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0)); codegen_->GenerateStaticOrDirectCall(invoke, temp); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index a09ecb8fef..1db16002c0 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -158,16 +158,16 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 { class SuspendCheckSlowPathX86 : public SlowPathCodeX86 { public: - explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) + SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) : instruction_(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(instruction_->GetLocations()); + SaveLiveRegisters(codegen, instruction_->GetLocations()); __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend))); codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); - codegen->RestoreLiveRegisters(instruction_->GetLocations()); + RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ jmp(GetReturnLabel()); } else { @@ -198,15 +198,15 @@ class LoadStringSlowPathX86 : public SlowPathCodeX86 { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex())); __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString))); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } @@ -231,7 +231,7 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { LocationSummary* locations = at_->GetLocations(); CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex())); @@ -239,7 +239,7 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { __ fs()->call(Address::Absolute(do_clinit_ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage) : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType))); - codegen->RecordPcInfo(at_, dex_pc_); + RecordPcInfo(codegen, at_, dex_pc_); // Move the class to the desired location. Location out = locations->Out(); @@ -248,7 +248,7 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { x86_codegen->Move32(out, Location::RegisterLocation(EAX)); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } @@ -287,7 +287,7 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -306,11 +306,11 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast))); } - codegen->RecordPcInfo(instruction_, dex_pc_); + RecordPcInfo(codegen, instruction_, dex_pc_); if (instruction_->IsInstanceOf()) { x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 07ba95dcfb..90d87d4b9f 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -72,7 +72,7 @@ class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 { __ Bind(GetEntryLabel()); __ gs()->call( Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true)); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); } private: @@ -88,7 +88,7 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 { __ Bind(GetEntryLabel()); __ gs()->call( Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true)); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); } private: @@ -136,10 +136,10 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(instruction_->GetLocations()); + SaveLiveRegisters(codegen, instruction_->GetLocations()); __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true)); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); - codegen->RestoreLiveRegisters(instruction_->GetLocations()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ jmp(GetReturnLabel()); } else { @@ -181,7 +181,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 { Location::RegisterLocation(calling_convention.GetRegisterAt(1))); __ gs()->call(Address::Absolute( QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true)); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); } private: @@ -207,7 +207,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex())); @@ -215,7 +215,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { __ gs()->call(Address::Absolute((do_clinit_ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage) : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true)); - codegen->RecordPcInfo(at_, dex_pc_); + RecordPcInfo(codegen, at_, dex_pc_); Location out = locations->Out(); // Move the class to the desired location. @@ -224,7 +224,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { x64_codegen->Move(out, Location::RegisterLocation(RAX)); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } @@ -255,7 +255,7 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1))); @@ -263,9 +263,9 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { Immediate(instruction_->GetStringIndex())); __ gs()->call(Address::Absolute( QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true)); - codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } @@ -293,7 +293,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(locations); + SaveLiveRegisters(codegen, locations); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -312,13 +312,13 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { __ gs()->call( Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true)); } - codegen->RecordPcInfo(instruction_, dex_pc_); + RecordPcInfo(codegen, instruction_, dex_pc_); if (instruction_->IsInstanceOf()) { x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); } - codegen->RestoreLiveRegisters(locations); + RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); } @@ -374,7 +374,6 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo } DCHECK(!IsLeafMethod()); - RecordPcInfo(invoke, invoke->GetDexPc()); } void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const { @@ -1216,6 +1215,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi codegen_->GenerateStaticOrDirectCall( invoke, invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index a82d80af13..0c9eb94172 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -114,12 +114,13 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM { CodeGeneratorARM* codegen = down_cast<CodeGeneratorARM*>(codegen_in); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(invoke_->GetLocations()); + SaveLiveRegisters(codegen, invoke_->GetLocations()); MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen); if (invoke_->IsInvokeStaticOrDirect()) { codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister); + RecordPcInfo(codegen, invoke_, invoke_->GetDexPc()); } else { UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented"; UNREACHABLE(); @@ -133,7 +134,7 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM { MoveFromReturnRegister(out, invoke_->GetType(), codegen); } - codegen->RestoreLiveRegisters(invoke_->GetLocations()); + RestoreLiveRegisters(codegen, invoke_->GetLocations()); __ b(GetExitLabel()); } diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 1ddff8a125..19b04ae094 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -122,12 +122,13 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 { CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(invoke_->GetLocations()); + SaveLiveRegisters(codegen, invoke_->GetLocations()); MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen); if (invoke_->IsInvokeStaticOrDirect()) { codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister); + RecordPcInfo(codegen, invoke_, invoke_->GetDexPc()); } else { UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented"; UNREACHABLE(); @@ -141,7 +142,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 { MoveFromReturnRegister(out, invoke_->GetType(), codegen); } - codegen->RestoreLiveRegisters(invoke_->GetLocations()); + RestoreLiveRegisters(codegen, invoke_->GetLocations()); __ B(GetExitLabel()); } diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index c73f092a61..2064b18138 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -134,12 +134,13 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 { CodeGeneratorX86_64* codegen = down_cast<CodeGeneratorX86_64*>(codegen_in); __ Bind(GetEntryLabel()); - codegen->SaveLiveRegisters(invoke_->GetLocations()); + SaveLiveRegisters(codegen, invoke_->GetLocations()); MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen); if (invoke_->IsInvokeStaticOrDirect()) { codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI)); + RecordPcInfo(codegen, invoke_, invoke_->GetDexPc()); } else { UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented"; UNREACHABLE(); @@ -153,7 +154,7 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 { MoveFromReturnRegister(out, invoke_->GetType(), codegen); } - codegen->RestoreLiveRegisters(invoke_->GetLocations()); + RestoreLiveRegisters(codegen, invoke_->GetLocations()); __ jmp(GetExitLabel()); } |