summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing/code_generator_arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/code_generator_arm.cc')
-rw-r--r--compiler/optimizing/code_generator_arm.cc164
1 files changed, 146 insertions, 18 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8c54cdfcf3..6218fc973a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -212,8 +212,9 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
codegen->RestoreLiveRegisters(locations);
@@ -266,6 +267,49 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
};
+class TypeCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
+};
+
#undef __
#undef __
@@ -766,6 +810,9 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
default:
LOG(FATAL) << "Unexpected type " << instruction->GetType();
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ Move32(location, temp_location);
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1861,10 +1908,18 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1922,10 +1977,18 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2459,10 +2522,18 @@ void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, cls, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2521,10 +2592,18 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, cls, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -2577,5 +2656,54 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
+void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeARM* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ cmp(out, ShifterOperand(cls));
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ b(&zero, NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, Location::RegisterLocation(out));
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ }
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace arm
} // namespace art