diff options
author | Nicolas Geoffray <ngeoffray@google.com> | 2014-11-06 08:59:20 +0000 |
---|---|---|
committer | Nicolas Geoffray <ngeoffray@google.com> | 2014-11-07 15:32:37 +0000 |
commit | 6f5c41f9e409bc4da53b5d7c385202255e391e72 (patch) | |
tree | bea48b3f23fdac7d566dd3b32dde1f86014b5a02 /compiler/optimizing | |
parent | 86fe4e41720cab85e3e40c45c0436521e56b25d5 (diff) | |
download | android_art-6f5c41f9e409bc4da53b5d7c385202255e391e72.tar.gz android_art-6f5c41f9e409bc4da53b5d7c385202255e391e72.tar.bz2 android_art-6f5c41f9e409bc4da53b5d7c385202255e391e72.zip |
Implement instanceof in optimizing.
- Only fast-path for now: null or same class.
- Use pQuickInstanceofNonTrivial for slow path.
Change-Id: Ic5196b94bef792f081f3cb4d15157058e1381e6b
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/builder.cc | 25 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 97 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 106 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 105 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 40 |
6 files changed, 365 insertions, 9 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index fc7333fa25..05213a18be 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -1260,7 +1260,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 return false; } current_block_->AddInstruction( - new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset)); + new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset)); UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction()); break; } @@ -1282,6 +1282,29 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 break; } + case Instruction::INSTANCE_OF: { + uint16_t type_index = instruction.VRegC_22c(); + bool type_known_final; + bool type_known_abstract; + bool is_referrers_class; + bool can_access = compiler_driver_->CanAccessTypeWithoutChecks( + dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index, + &type_known_final, &type_known_abstract, &is_referrers_class); + if (!can_access) { + return false; + } + HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot); + HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset); + current_block_->AddInstruction(cls); + // The class needs a temporary before being used by the type check. + Temporaries temps(graph_, 1); + temps.Add(cls); + current_block_->AddInstruction( + new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset)); + UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction()); + break; + } + default: return false; } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 91b28c4d99..8e6f8ea5eb 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -212,8 +212,9 @@ class LoadClassSlowPathARM : public SlowPathCodeARM { arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_); // Move the class to the desired location. - if (locations->Out().IsValid()) { - DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + Location out = locations->Out(); + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } codegen->RestoreLiveRegisters(locations); @@ -266,6 +267,49 @@ class LoadStringSlowPathARM : public SlowPathCodeARM { DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM); }; +class TypeCheckSlowPathARM : public SlowPathCodeARM { + public: + explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class) + : instruction_(instruction), + object_class_(object_class) {} + + virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); + __ Bind(GetEntryLabel()); + codegen->SaveLiveRegisters(locations); + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + MoveOperands move1(locations->InAt(1), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + nullptr); + MoveOperands move2(object_class_, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + nullptr); + HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + parallel_move.AddMove(&move1); + parallel_move.AddMove(&move2); + arm_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + arm_codegen->InvokeRuntime( + QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc()); + arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); + + codegen->RestoreLiveRegisters(locations); + __ b(GetExitLabel()); + } + + private: + HTypeCheck* const instruction_; + const Location object_class_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); +}; + #undef __ #undef __ @@ -2577,5 +2621,54 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc()); } +void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Register obj = locations->InAt(0).As<Register>(); + Register cls = locations->InAt(1).As<Register>(); + Register out = locations->Out().As<Register>(); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + Label done, zero; + SlowPathCodeARM* slow_path = nullptr; + + // Return 0 if `obj` is null. + // TODO: avoid this check if we know obj is not null. + __ cmp(obj, ShifterOperand(0)); + __ b(&zero, EQ); + // Compare the class of `obj` with `cls`. + __ LoadFromOffset(kLoadWord, out, obj, class_offset); + __ cmp(out, ShifterOperand(cls)); + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ b(&zero, NE); + __ LoadImmediate(out, 1); + __ b(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM( + instruction, Location::RegisterLocation(out)); + codegen_->AddSlowPath(slow_path); + __ b(slow_path->GetEntryLabel(), NE); + __ LoadImmediate(out, 1); + __ b(&done); + } + __ Bind(&zero); + __ LoadImmediate(out, 0); + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } + __ Bind(&done); +} + } // namespace arm } // namespace art diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index e84346b28d..4fe954de00 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -550,6 +550,7 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph, M(StaticFieldGet) \ M(StaticFieldSet) \ M(Throw) \ + M(TypeCheck) \ M(TypeConversion) \ #define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 129c374947..548d6995d0 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -241,10 +241,12 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { codegen->RecordPcInfo(at_, dex_pc_); // Move the class to the desired location. - if (locations->Out().IsValid()) { - DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); - x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); + Location out = locations->Out(); + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + x86_codegen->Move32(out, Location::RegisterLocation(EAX)); } + codegen->RestoreLiveRegisters(locations); __ jmp(GetExitLabel()); } @@ -266,6 +268,49 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86); }; +class TypeCheckSlowPathX86 : public SlowPathCodeX86 { + public: + TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class) + : instruction_(instruction), + object_class_(object_class) {} + + virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); + __ Bind(GetEntryLabel()); + codegen->SaveLiveRegisters(locations); + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + MoveOperands move1(locations->InAt(1), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + nullptr); + MoveOperands move2(object_class_, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + nullptr); + HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + parallel_move.AddMove(&move1); + parallel_move.AddMove(&move2); + x86_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial))); + codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); + codegen->RestoreLiveRegisters(locations); + + __ jmp(GetExitLabel()); + } + + private: + HTypeCheck* const instruction_; + const Location object_class_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); +}; + #undef __ #define __ reinterpret_cast<X86Assembler*>(GetAssembler())-> @@ -2671,5 +2716,60 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } +void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Register obj = locations->InAt(0).As<Register>(); + Location cls = locations->InAt(1); + Register out = locations->Out().As<Register>(); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + Label done, zero; + SlowPathCodeX86* slow_path = nullptr; + + // Return 0 if `obj` is null. + // TODO: avoid this check if we know obj is not null. + __ testl(obj, obj); + __ j(kEqual, &zero); + __ movl(out, Address(obj, class_offset)); + // Compare the class of `obj` with `cls`. + if (cls.IsRegister()) { + __ cmpl(out, cls.As<Register>()); + } else { + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(out, Address(ESP, cls.GetStackIndex())); + } + + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ j(kNotEqual, &zero); + __ movl(out, Immediate(1)); + __ jmp(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( + instruction, Location::RegisterLocation(out)); + codegen_->AddSlowPath(slow_path); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ movl(out, Immediate(1)); + __ jmp(&done); + } + __ Bind(&zero); + __ movl(out, Immediate(0)); + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } + __ Bind(&done); +} + } // namespace x86 } // namespace art diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index efe1ef2155..b9891d6cd9 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -224,10 +224,11 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true)); codegen->RecordPcInfo(at_, dex_pc_); + Location out = locations->Out(); // Move the class to the desired location. - if (locations->Out().IsValid()) { - DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); - x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); + if (out.IsValid()) { + DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); + x64_codegen->Move(out, Location::RegisterLocation(RAX)); } codegen->RestoreLiveRegisters(locations); @@ -281,6 +282,50 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64); }; +class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { + public: + TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class) + : instruction_(instruction), + object_class_(object_class) {} + + virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); + + CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); + __ Bind(GetEntryLabel()); + codegen->SaveLiveRegisters(locations); + + // We're moving two locations to locations that could overlap, so we need a parallel + // move resolver. + InvokeRuntimeCallingConvention calling_convention; + MoveOperands move1(locations->InAt(1), + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + nullptr); + MoveOperands move2(object_class_, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + nullptr); + HParallelMove parallel_move(codegen->GetGraph()->GetArena()); + parallel_move.AddMove(&move1); + parallel_move.AddMove(&move2); + x64_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); + + __ gs()->call( + Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true)); + codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); + x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); + + codegen->RestoreLiveRegisters(locations); + __ jmp(GetExitLabel()); + } + + private: + HTypeCheck* const instruction_; + const Location object_class_; + + DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64); +}; + #undef __ #define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())-> @@ -2661,5 +2706,59 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } +void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary::CallKind call_kind = instruction->IsClassFinal() + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + locations->SetOut(Location::RequiresRegister()); +} + +void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) { + LocationSummary* locations = instruction->GetLocations(); + CpuRegister obj = locations->InAt(0).As<CpuRegister>(); + Location cls = locations->InAt(1); + CpuRegister out = locations->Out().As<CpuRegister>(); + uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + Label done, zero; + SlowPathCodeX86_64* slow_path = nullptr; + + // Return 0 if `obj` is null. + // TODO: avoid this check if we know obj is not null. + __ testl(obj, obj); + __ j(kEqual, &zero); + // Compare the class of `obj` with `cls`. + __ movl(out, Address(obj, class_offset)); + if (cls.IsRegister()) { + __ cmpl(out, cls.As<CpuRegister>()); + } else { + DCHECK(cls.IsStackSlot()) << cls; + __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex())); + } + if (instruction->IsClassFinal()) { + // Classes must be equal for the instanceof to succeed. + __ j(kNotEqual, &zero); + __ movl(out, Immediate(1)); + __ jmp(&done); + } else { + // If the classes are not equal, we go into a slow path. + DCHECK(locations->OnlyCallsOnSlowPath()); + slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64( + instruction, Location::RegisterLocation(out.AsRegister())); + codegen_->AddSlowPath(slow_path); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ movl(out, Immediate(1)); + __ jmp(&done); + } + __ Bind(&zero); + __ movl(out, Immediate(0)); + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } + __ Bind(&done); +} + } // namespace x86_64 } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 47ed8dfe88..ecf8c370f0 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -524,6 +524,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { M(SuspendCheck, Instruction) \ M(Temporary, Instruction) \ M(Throw, Instruction) \ + M(TypeCheck, Instruction) \ M(TypeConversion, Instruction) \ #define FOR_EACH_INSTRUCTION(M) \ @@ -2325,6 +2326,45 @@ class HThrow : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HThrow); }; +class HTypeCheck : public HExpression<2> { + public: + explicit HTypeCheck(HInstruction* object, + HLoadClass* constant, + bool class_is_final, + uint32_t dex_pc) + : HExpression(Primitive::kPrimBoolean, SideEffects::None()), + class_is_final_(class_is_final), + dex_pc_(dex_pc) { + SetRawInputAt(0, object); + SetRawInputAt(1, constant); + } + + bool CanBeMoved() const OVERRIDE { return true; } + + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + UNUSED(other); + return true; + } + + bool NeedsEnvironment() const OVERRIDE { + // TODO: Can we debug when doing a runtime instanceof check? + return false; + } + + uint32_t GetDexPc() const { return dex_pc_; } + + bool IsClassFinal() const { return class_is_final_; } + + DECLARE_INSTRUCTION(TypeCheck); + + private: + const bool class_is_final_; + const uint32_t dex_pc_; + + DISALLOW_COPY_AND_ASSIGN(HTypeCheck); +}; + + class MoveOperands : public ArenaObject<kArenaAllocMisc> { public: MoveOperands(Location source, Location destination, HInstruction* instruction) |