summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorCalin Juravle <calin@google.com>2015-01-12 18:45:46 +0000
committerCalin Juravle <calin@google.com>2015-01-21 14:26:35 +0000
commit77520bca97ec44e3758510cebd0f20e3bb4584ea (patch)
tree2e3be6fdc182e5cf5ae390019457af5e9c1ed242 /compiler
parent4d2c611bf17ff309abfa152e56c0b98a21ec8787 (diff)
downloadandroid_art-77520bca97ec44e3758510cebd0f20e3bb4584ea.tar.gz
android_art-77520bca97ec44e3758510cebd0f20e3bb4584ea.tar.bz2
android_art-77520bca97ec44e3758510cebd0f20e3bb4584ea.zip
Record implicit null checks at the actual invoke time.
ImplicitNullChecks are recorded only for instructions directly (see NB below) preceeded by NullChecks in the graph. This way we avoid recording redundant safepoints and minimize the code size increase. NB: ParallalelMoves might be inserted by the register allocator between the NullChecks and their uses. These modify the environment and the correct action would be to reverse their modification. This will be addressed in a follow-up CL. Change-Id: Ie50006e5a4bd22932dcf11348f5a655d253cd898
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator.cc34
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.cc68
-rw-r--r--compiler/optimizing/code_generator_arm.h3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc37
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc53
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc29
-rw-r--r--compiler/optimizing/nodes.cc23
-rw-r--r--compiler/optimizing/nodes.h55
10 files changed, 246 insertions, 60 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index cdfd989bb8..8d9a7b71e9 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -638,6 +638,40 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
}
}
+bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
+ HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
+ return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck();
+}
+
+void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
+ // If we are from a static path don't record the pc as we can't throw NPE.
+ // NB: having the checks here makes the code much less verbose in the arch
+ // specific code generators.
+ if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
+ return;
+ }
+
+ if (!compiler_options_.GetImplicitNullChecks()) {
+ return;
+ }
+
+ if (!instr->CanDoImplicitNullCheck()) {
+ return;
+ }
+
+ // Find the first previous instruction which is not a move.
+ HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
+
+ // If the instruction is a null check it means that `instr` is the first user
+ // and needs to record the pc.
+ if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
+ HNullCheck* null_check = first_prev_not_move->AsNullCheck();
+ // TODO: The parallel moves modify the environment. Their changes need to be reverted
+ // otherwise the stack maps at the throw point will not be correct.
+ RecordPcInfo(null_check, null_check->GetDexPc());
+ }
+}
+
void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
RegisterSet* register_set = locations->GetLiveRegisters();
size_t stack_offset = first_register_slot_in_slow_path_;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 682c260585..f66aed912a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -152,6 +152,8 @@ class CodeGenerator {
virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0;
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
+ bool CanMoveNullCheckToUser(HNullCheck* null_check);
+ void MaybeRecordImplicitNullCheck(HInstruction* instruction);
void AddSlowPath(SlowPathCode* slow_path) {
slow_paths_.Add(slow_path);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 07c84bcc01..980de040fa 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1226,6 +1226,7 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
} else {
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
@@ -1264,6 +1265,7 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
} else {
__ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
@@ -2579,7 +2581,8 @@ void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
Register value_lo,
Register value_hi,
Register temp1,
- Register temp2) {
+ Register temp2,
+ HInstruction* instruction) {
Label fail;
if (offset != 0) {
__ LoadImmediate(temp1, offset);
@@ -2590,6 +2593,7 @@ void InstructionCodeGeneratorARM::GenerateWideAtomicStore(Register addr,
// We need a load followed by store. (The address used in a STREX instruction must
// be the same as the address in the most recently executed LDREX instruction.)
__ ldrexd(temp1, temp2, addr);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ strexd(temp1, value_lo, value_hi, addr);
__ cmp(temp1, ShifterOperand(0));
__ b(&fail, NE);
@@ -2664,13 +2668,7 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value_reg = value.AsRegister<Register>();
- __ StoreToOffset(kStoreWord, value_reg, base, offset);
- if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, base, value_reg);
- }
+ __ StoreToOffset(kStoreWord, value.AsRegister<Register>(), base, offset);
break;
}
@@ -2680,9 +2678,11 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
value.AsRegisterPairLow<Register>(),
value.AsRegisterPairHigh<Register>(),
locations->GetTemp(0).AsRegister<Register>(),
- locations->GetTemp(1).AsRegister<Register>());
+ locations->GetTemp(1).AsRegister<Register>(),
+ instruction);
} else {
__ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), base, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
break;
}
@@ -2704,9 +2704,11 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
value_reg_lo,
value_reg_hi,
locations->GetTemp(2).AsRegister<Register>(),
- locations->GetTemp(3).AsRegister<Register>());
+ locations->GetTemp(3).AsRegister<Register>(),
+ instruction);
} else {
__ StoreDToOffset(value_reg, base, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
break;
}
@@ -2716,6 +2718,17 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
UNREACHABLE();
}
+ // Longs and doubles are handled in the switch.
+ if (field_type != Primitive::kPrimLong && field_type != Primitive::kPrimDouble) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
+ codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
+ }
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
@@ -2804,9 +2817,11 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
Register lo = locations->GetTemp(0).AsRegister<Register>();
Register hi = locations->GetTemp(1).AsRegister<Register>();
GenerateWideAtomicLoad(base, offset, lo, hi);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ vmovdrr(out_reg, lo, hi);
} else {
__ LoadDFromOffset(out_reg, base, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
break;
}
@@ -2816,6 +2831,11 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
UNREACHABLE();
}
+ // Doubles are handled in the switch.
+ if (field_type != Primitive::kPrimDouble) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -2856,17 +2876,18 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
- ? Location::RequiresRegister()
- : Location::RegisterOrConstant(instruction->InputAt(0));
- locations->SetInAt(0, loc);
+ locations->SetInAt(0, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
}
void InstructionCodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
Location obj = instruction->GetLocations()->InAt(0);
+
__ LoadFromOffset(kLoadWord, IP, obj.AsRegister<Register>(), 0);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -2878,14 +2899,8 @@ void InstructionCodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruct
LocationSummary* locations = instruction->GetLocations();
Location obj = locations->InAt(0);
- if (obj.IsRegister()) {
- __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
- } else {
- DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
- __ b(slow_path->GetEntryLabel());
- }
+ __ cmp(obj.AsRegister<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
}
void InstructionCodeGeneratorARM::VisitNullCheck(HNullCheck* instruction) {
@@ -3028,6 +3043,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
@@ -3111,6 +3127,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
__ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, value, IP, data_offset);
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
Register temp = locations->GetTemp(0).AsRegister<Register>();
@@ -3165,6 +3182,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
__ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
__ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
}
+
break;
}
@@ -3172,6 +3190,11 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
LOG(FATAL) << "Unreachable type " << value_type;
UNREACHABLE();
}
+
+ // Ints and objects are handled in the switch.
+ if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
@@ -3187,6 +3210,7 @@ void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
Register obj = locations->InAt(0).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index fe373d54e3..0de6669aa7 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -139,7 +139,8 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateWideAtomicStore(Register addr, uint32_t offset,
Register value_lo, Register value_hi,
- Register temp1, Register temp2);
+ Register temp1, Register temp2,
+ HInstruction* instruction);
void GenerateWideAtomicLoad(Register addr, uint32_t offset,
Register out_lo, Register out_hi);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 306845beb8..0109363062 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -997,11 +997,12 @@ void CodeGeneratorARM64::Load(Primitive::Type type,
}
}
-void CodeGeneratorARM64::LoadAcquire(Primitive::Type type,
+void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
CPURegister dst,
const MemOperand& src) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp_base = temps.AcquireX();
+ Primitive::Type type = instruction->GetType();
DCHECK(!src.IsRegisterOffset());
DCHECK(!src.IsPreIndex());
@@ -1013,16 +1014,20 @@ void CodeGeneratorARM64::LoadAcquire(Primitive::Type type,
switch (type) {
case Primitive::kPrimBoolean:
__ Ldarb(Register(dst), base);
+ MaybeRecordImplicitNullCheck(instruction);
break;
case Primitive::kPrimByte:
__ Ldarb(Register(dst), base);
+ MaybeRecordImplicitNullCheck(instruction);
__ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
break;
case Primitive::kPrimChar:
__ Ldarh(Register(dst), base);
+ MaybeRecordImplicitNullCheck(instruction);
break;
case Primitive::kPrimShort:
__ Ldarh(Register(dst), base);
+ MaybeRecordImplicitNullCheck(instruction);
__ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
break;
case Primitive::kPrimInt:
@@ -1030,6 +1035,7 @@ void CodeGeneratorARM64::LoadAcquire(Primitive::Type type,
case Primitive::kPrimLong:
DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
__ Ldar(Register(dst), base);
+ MaybeRecordImplicitNullCheck(instruction);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
@@ -1038,6 +1044,7 @@ void CodeGeneratorARM64::LoadAcquire(Primitive::Type type,
Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
__ Ldar(temp, base);
+ MaybeRecordImplicitNullCheck(instruction);
__ Fmov(FPRegister(dst), temp);
break;
}
@@ -1399,6 +1406,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
}
codegen_->Load(type, OutputCPURegister(instruction), source);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
@@ -1410,6 +1418,7 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
__ Ldr(OutputRegister(instruction),
HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
@@ -1454,6 +1463,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
}
codegen_->Store(value_type, value, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -1816,14 +1826,17 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* ins
if (instruction->IsVolatile()) {
if (kUseAcquireRelease) {
- codegen_->LoadAcquire(instruction->GetType(), OutputCPURegister(instruction), field);
+ // NB: LoadAcquire will record the pc info if needed.
+ codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
} else {
codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
// For IRIW sequential consistency kLoadAny is not sufficient.
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
} else {
codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -1843,13 +1856,16 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins
if (instruction->IsVolatile()) {
if (kUseAcquireRelease) {
codegen_->StoreRelease(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
codegen_->Store(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
} else {
codegen_->Store(field_type, value, HeapOperand(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
@@ -1953,6 +1969,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
} else {
__ Ldr(temp, HeapOperandFrom(receiver, class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
__ Ldr(temp, HeapOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
@@ -2018,6 +2035,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
DCHECK(receiver.IsRegister());
__ Ldr(temp, HeapOperandFrom(receiver, class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ Ldr(temp, HeapOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
@@ -2293,6 +2311,9 @@ void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
Location obj = instruction->GetLocations()->InAt(0);
__ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
@@ -2305,13 +2326,8 @@ void InstructionCodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instru
LocationSummary* locations = instruction->GetLocations();
Location obj = locations->InAt(0);
- if (obj.IsRegister()) {
- __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
- } else {
- DCHECK(obj.IsConstant()) << obj;
- DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
- __ B(slow_path->GetEntryLabel());
- }
+
+ __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
}
void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
@@ -2512,7 +2528,8 @@ void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruc
if (instruction->IsVolatile()) {
if (kUseAcquireRelease) {
- codegen_->LoadAcquire(instruction->GetType(), OutputCPURegister(instruction), field);
+ // NB: LoadAcquire will record the pc info if needed.
+ codegen_->LoadAcquire(instruction, OutputCPURegister(instruction), field);
} else {
codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
// For IRIW sequential consistency kLoadAny is not sufficient.
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e20d02ee2d..27c6fbdbf4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -267,7 +267,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadCurrentMethod(vixl::Register current_method);
- void LoadAcquire(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
+ void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
void StoreRelease(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
// Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 57f01e8e16..ac6fdbcfe9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1202,6 +1202,7 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
} else {
__ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -1238,6 +1239,7 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
} else {
__ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -2808,11 +2810,13 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
if (is_volatile) {
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ movsd(temp, Address(base, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movd(out.AsRegisterPairLow<Register>(), temp);
__ psrlq(temp, Immediate(32));
__ movd(out.AsRegisterPairHigh<Register>(), temp);
} else {
__ movl(out.AsRegisterPairLow<Register>(), Address(base, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(out.AsRegisterPairHigh<Register>(), Address(base, kX86WordSize + offset));
}
break;
@@ -2833,6 +2837,11 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
UNREACHABLE();
}
+ // Longs are handled in the switch.
+ if (field_type != Primitive::kPrimLong) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -2904,12 +2913,6 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
__ movl(Address(base, offset), value.AsRegister<Register>());
-
- if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
- }
break;
}
@@ -2921,8 +2924,10 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
__ movd(temp2, value.AsRegisterPairHigh<Register>());
__ punpckldq(temp1, temp2);
__ movsd(Address(base, offset), temp1);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
__ movl(Address(base, offset), value.AsRegisterPairLow<Register>());
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(Address(base, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
}
break;
@@ -2943,6 +2948,17 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
UNREACHABLE();
}
+ // Longs are handled in the switch.
+ if (field_type != Primitive::kPrimLong) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Register card = locations->GetTemp(1).AsRegister<Register>();
+ codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
+ }
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
@@ -2993,8 +3009,12 @@ void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
LocationSummary* locations = instruction->GetLocations();
Location obj = locations->InAt(0);
+
__ testl(EAX, Address(obj.AsRegister<Register>(), 0));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -3040,7 +3060,8 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
- switch (instruction->GetType()) {
+ Primitive::Type type = instruction->GetType();
+ switch (type) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Register out = locations->Out().AsRegister<Register>();
@@ -3108,10 +3129,12 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
} else {
__ movl(out.AsRegisterPairLow<Register>(),
Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(out.AsRegisterPairHigh<Register>(),
Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
}
@@ -3120,12 +3143,16 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+ LOG(FATAL) << "Unimplemented register type " << type;
UNREACHABLE();
case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
}
+
+ if (type != Primitive::kPrimLong) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
@@ -3202,6 +3229,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3225,6 +3253,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3253,6 +3282,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
@@ -3274,17 +3304,20 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
if (value.IsRegisterPair()) {
__ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
} else {
DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
__ movl(Address(obj, offset), Immediate(Low32Bits(val)));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val)));
}
} else {
if (value.IsRegisterPair()) {
__ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
value.AsRegisterPairLow<Register>());
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
value.AsRegisterPairHigh<Register>());
} else {
@@ -3292,6 +3325,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
__ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
Immediate(Low32Bits(val)));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
__ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
Immediate(High32Bits(val)));
}
@@ -3322,6 +3356,7 @@ void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
Register obj = locations->InAt(0).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dd6861f67b..350392fbf4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1244,6 +1244,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
} else {
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -1280,6 +1281,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
} else {
__ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
}
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -2589,6 +2591,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
UNREACHABLE();
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -2643,11 +2647,6 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
__ movl(Address(base, offset), value.AsRegister<CpuRegister>());
- if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
- codegen_->MarkGCCard(temp, card, base, value.AsRegister<CpuRegister>());
- }
break;
}
@@ -2671,6 +2670,14 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
UNREACHABLE();
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
+ codegen_->MarkGCCard(temp, card, base, value.AsRegister<CpuRegister>());
+ }
+
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
@@ -2721,6 +2728,9 @@ void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
}
void InstructionCodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
LocationSummary* locations = instruction->GetLocations();
Location obj = locations->InAt(0);
@@ -2873,6 +2883,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
@@ -2941,6 +2952,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -2967,6 +2979,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -2995,7 +3008,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
-
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
if (needs_write_barrier) {
DCHECK_EQ(value_type, Primitive::kPrimNot);
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
@@ -3023,6 +3036,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
value.AsRegister<CpuRegister>());
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3037,6 +3051,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ movss(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
value.AsFpuRegister<XmmRegister>());
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3051,6 +3066,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
__ movsd(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
value.AsFpuRegister<XmmRegister>());
}
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3073,6 +3089,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction)
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(obj, offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index ade31380ec..39ec22bea1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -15,6 +15,7 @@
*/
#include "nodes.h"
+
#include "ssa_builder.h"
#include "utils/growable_array.h"
@@ -456,6 +457,22 @@ static void RemoveFromUseList(T* user,
}
}
+HInstruction* HInstruction::GetNextDisregardingMoves() const {
+ HInstruction* next = GetNext();
+ while (next != nullptr && next->IsParallelMove()) {
+ next = next->GetNext();
+ }
+ return next;
+}
+
+HInstruction* HInstruction::GetPreviousDisregardingMoves() const {
+ HInstruction* previous = GetPrevious();
+ while (previous != nullptr && previous->IsParallelMove()) {
+ previous = previous->GetPrevious();
+ }
+ return previous;
+}
+
void HInstruction::RemoveUser(HInstruction* user, size_t input_index) {
RemoveFromUseList(user, input_index, &uses_);
}
@@ -654,11 +671,7 @@ HConstant* HBinaryOperation::TryStaticEvaluation() const {
}
bool HCondition::IsBeforeWhenDisregardMoves(HIf* if_) const {
- HInstruction* previous = if_->GetPrevious();
- while (previous != nullptr && previous->IsParallelMove()) {
- previous = previous->GetPrevious();
- }
- return previous == this;
+ return this == if_->GetPreviousDisregardingMoves();
}
bool HInstruction::Equals(HInstruction* other) const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fa51f27f0a..0bd571a1fb 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -696,6 +696,9 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
HInstruction* GetNext() const { return next_; }
HInstruction* GetPrevious() const { return previous_; }
+ HInstruction* GetNextDisregardingMoves() const;
+ HInstruction* GetPreviousDisregardingMoves() const;
+
HBasicBlock* GetBlock() const { return block_; }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
@@ -716,6 +719,8 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
virtual bool CanThrow() const { return false; }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
+ virtual bool CanDoImplicitNullCheck() const { return false; }
+
void AddUseAt(HInstruction* user, size_t index) {
uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HInstruction>(user, index, uses_);
}
@@ -1597,7 +1602,7 @@ class HInvoke : public HInstruction {
// Runtime needs to walk the stack, so Dex -> Dex calls need to
// know their environment.
- virtual bool NeedsEnvironment() const { return true; }
+ bool NeedsEnvironment() const OVERRIDE { return true; }
void SetArgumentAt(size_t index, HInstruction* argument) {
SetRawInputAt(index, argument);
@@ -1659,6 +1664,12 @@ class HInvokeStaticOrDirect : public HInvoke {
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
invoke_type_(invoke_type) {}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ // We access the method via the dex cache so we can't do an implicit null check.
+ // TODO: for intrinsics we can generate implicit null checks.
+ return false;
+ }
+
InvokeType GetInvokeType() const { return invoke_type_; }
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
@@ -1680,6 +1691,11 @@ class HInvokeVirtual : public HInvoke {
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
vtable_index_(vtable_index) {}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ // TODO: Add implicit null checks in intrinsics.
+ return !GetLocations()->Intrinsified();
+ }
+
uint32_t GetVTableIndex() const { return vtable_index_; }
DECLARE_INSTRUCTION(InvokeVirtual);
@@ -1701,6 +1717,11 @@ class HInvokeInterface : public HInvoke {
: HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
imt_index_(imt_index) {}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ // TODO: Add implicit null checks in intrinsics.
+ return !GetLocations()->Intrinsified();
+ }
+
uint32_t GetImtIndex() const { return imt_index_; }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
@@ -2180,7 +2201,11 @@ class HInstanceFieldGet : public HExpression<1> {
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- virtual size_t ComputeHashCode() const {
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ return GetFieldOffset().Uint32Value() < kPageSize;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
@@ -2210,11 +2235,14 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ return GetFieldOffset().Uint32Value() < kPageSize;
+ }
+
const FieldInfo& GetFieldInfo() const { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
-
HInstruction* GetValue() const { return InputAt(1); }
DECLARE_INSTRUCTION(InstanceFieldSet);
@@ -2238,6 +2266,15 @@ class HArrayGet : public HExpression<2> {
UNUSED(other);
return true;
}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ // TODO: We can be smarter here.
+ // Currently, the array access is always preceded by an ArrayLength or a NullCheck
+ // which generates the implicit null check. There are cases when these can be removed
+ // to produce better code. If we ever add optimizations to do so we should allow an
+ // implicit check here (as long as the address falls in the first page).
+ return false;
+ }
+
void SetType(Primitive::Type type) { type_ = type; }
HInstruction* GetArray() const { return InputAt(0); }
@@ -2265,12 +2302,17 @@ class HArraySet : public HTemplateInstruction<3> {
SetRawInputAt(2, value);
}
- bool NeedsEnvironment() const {
+ bool NeedsEnvironment() const OVERRIDE {
// We currently always call a runtime method to catch array store
// exceptions.
return needs_type_check_;
}
+ bool CanDoImplicitNullCheck() const OVERRIDE {
+ // TODO: Same as for ArrayGet.
+ return false;
+ }
+
void ClearNeedsTypeCheck() {
needs_type_check_ = false;
}
@@ -2313,11 +2355,12 @@ class HArrayLength : public HExpression<1> {
SetRawInputAt(0, array);
}
- virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const {
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
UNUSED(other);
return true;
}
+ bool CanDoImplicitNullCheck() const OVERRIDE { return true; }
DECLARE_INSTRUCTION(ArrayLength);