diff options
Diffstat (limited to 'compiler/dex/quick')
27 files changed, 127 insertions, 127 deletions
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index c5ac4c1508..df4a9f2048 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1055,7 +1055,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { // new_lir replaces orig_lir in the pcrel_fixup list. void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next; - if (UNLIKELY(prev_lir == NULL)) { + if (UNLIKELY(prev_lir == nullptr)) { first_fixup_ = new_lir; } else { prev_lir->u.a.pcrel_next = new_lir; @@ -1066,7 +1066,7 @@ void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { // new_lir is inserted before orig_lir in the pcrel_fixup list. void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { new_lir->u.a.pcrel_next = orig_lir; - if (UNLIKELY(prev_lir == NULL)) { + if (UNLIKELY(prev_lir == nullptr)) { first_fixup_ = new_lir; } else { DCHECK(prev_lir->u.a.pcrel_next == orig_lir); @@ -1084,7 +1084,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) { uint8_t* const write_buffer = write_pos; - for (; lir != NULL; lir = NEXT_LIR(lir)) { + for (; lir != nullptr; lir = NEXT_LIR(lir)) { lir->offset = (write_pos - write_buffer); if (!lir->flags.is_nop) { int opcode = lir->opcode; @@ -1258,8 +1258,8 @@ void ArmMir2Lir::AssembleLIR() { generation ^= 1; // Note: nodes requring possible fixup linked in ascending order. lir = first_fixup_; - prev_lir = NULL; - while (lir != NULL) { + prev_lir = nullptr; + while (lir != nullptr) { /* * NOTE: the lir being considered here will be encoded following the switch (so long as * we're not in a retry situation). However, any new non-pc_rel instructions inserted @@ -1506,7 +1506,7 @@ void ArmMir2Lir::AssembleLIR() { case kFixupAdr: { const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]); LIR* target = lir->target; - int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment + int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t disp = target_disp - ((lir->offset + 4) & ~3); @@ -1642,7 +1642,7 @@ size_t ArmMir2Lir::GetInsnSize(LIR* lir) { uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) { LIR* end_lir = tail_lir->next; - LIR* last_fixup = NULL; + LIR* last_fixup = nullptr; for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) { if (!lir->flags.is_nop) { if (lir->flags.fixup != kFixupNone) { @@ -1658,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse } // Link into the fixup chain. lir->flags.use_def_invalid = true; - lir->u.a.pcrel_next = NULL; - if (first_fixup_ == NULL) { + lir->u.a.pcrel_next = nullptr; + if (first_fixup_ == nullptr) { first_fixup_ = lir; } else { last_fixup->u.a.pcrel_next = lir; diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 3d18af6169..6ba4016260 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -124,7 +124,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati } // Bounds check - if < 0 or >= size continue following switch OpRegImm(kOpCmp, keyReg, size-1); - LIR* branch_over = OpCondBranch(kCondHi, NULL); + LIR* branch_over = OpCondBranch(kCondHi, nullptr); // Load the displacement from the switch table RegStorage disp_reg = AllocTemp(); @@ -156,7 +156,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { - null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr); } } Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); @@ -165,12 +165,12 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { MarkPossibleNullPointerException(opt_flags); // Zero out the read barrier bits. OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); - LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL); + LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr); // r1 is zero except for the rb bits here. Copy the read barrier bits into r2. OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1); NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(), mirror::Object::MonitorOffset().Int32Value() >> 2); - LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL); + LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr); LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); @@ -238,7 +238,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { - null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); + null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr); } } if (!kUseReadBarrier) { @@ -252,16 +252,16 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); // Zero out except the read barrier bits. OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted); - LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL); + LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr); GenMemBarrier(kAnyStore); LIR* unlock_success_branch; if (!kUseReadBarrier) { Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); - unlock_success_branch = OpUnconditionalBranch(NULL); + unlock_success_branch = OpUnconditionalBranch(nullptr); } else { NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(), mirror::Object::MonitorOffset().Int32Value() >> 2); - unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL); + unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr); } LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); slow_unlock_branch->target = slow_path_target; diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 62903afbbc..8d20f1b37e 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -138,10 +138,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocatio RegStorage t_reg = AllocTemp(); LoadConstant(t_reg, -1); OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); - LIR* branch1 = OpCondBranch(kCondLt, NULL); - LIR* branch2 = OpCondBranch(kCondGt, NULL); + LIR* branch1 = OpCondBranch(kCondLt, nullptr); + LIR* branch2 = OpCondBranch(kCondGt, nullptr); OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); - LIR* branch3 = OpCondBranch(kCondEq, NULL); + LIR* branch3 = OpCondBranch(kCondEq, nullptr); LIR* it = OpIT(kCondHi, "E"); NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1)); @@ -389,7 +389,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va * generate the long form in an attempt to avoid an extra assembly pass. * TODO: consider interspersing slowpaths in code following unconditional branches. */ - bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget)); + bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget)); skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64); if (!skip && reg.Low8() && (check_value == 0)) { if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) { @@ -1159,12 +1159,12 @@ void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) { LIR* ArmMir2Lir::OpTestSuspend(LIR* target) { #ifdef ARM_R4_SUSPEND_FLAG NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1); - return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target); + return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target); #else RegStorage t_reg = AllocTemp(); LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(), t_reg, kUnsignedHalf, kNotVolatile); - LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg, + LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg, 0, target); FreeTemp(t_reg); return cmp_branch; diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 25ea6941c0..2ef92f851b 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -90,7 +90,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) { } } LIR* data_target = ScanLiteralPool(literal_list_, value, 0); - if (data_target == NULL) { + if (data_target == nullptr) { data_target = AddWordData(&literal_list_, value); } ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); @@ -411,7 +411,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift); } else { LOG(FATAL) << "Unexpected encoding operand count"; - return NULL; + return nullptr; } } @@ -695,7 +695,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) { } LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { - LIR* res = NULL; + LIR* res = nullptr; int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); if (r_dest.IsFloat()) { @@ -721,10 +721,10 @@ LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { LoadConstantNoClobber(r_dest.GetHigh(), val_hi); } } - if (res == NULL) { + if (res == nullptr) { // No short form - load from the literal pool. LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); - if (data_target == NULL) { + if (data_target == nullptr) { data_target = AddWideData(&literal_list_, val_lo, val_hi); } ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); @@ -814,7 +814,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size) { bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8(); - LIR* store = NULL; + LIR* store = nullptr; ArmOpcode opcode = kThumbBkpt; bool thumb_form = (all_low_regs && (scale == 0)); RegStorage reg_ptr; diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index 2f1ae66bfc..b78fb80aa0 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -663,7 +663,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { // new_lir replaces orig_lir in the pcrel_fixup list. void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next; - if (UNLIKELY(prev_lir == NULL)) { + if (UNLIKELY(prev_lir == nullptr)) { first_fixup_ = new_lir; } else { prev_lir->u.a.pcrel_next = new_lir; @@ -674,7 +674,7 @@ void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { // new_lir is inserted before orig_lir in the pcrel_fixup list. void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { new_lir->u.a.pcrel_next = orig_lir; - if (UNLIKELY(prev_lir == NULL)) { + if (UNLIKELY(prev_lir == nullptr)) { first_fixup_ = new_lir; } else { DCHECK(prev_lir->u.a.pcrel_next == orig_lir); @@ -889,8 +889,8 @@ void Arm64Mir2Lir::AssembleLIR() { generation ^= 1; // Note: nodes requiring possible fixup linked in ascending order. lir = first_fixup_; - prev_lir = NULL; - while (lir != NULL) { + prev_lir = nullptr; + while (lir != nullptr) { // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at // the time of insertion. Note that inserted instructions don't need use/def flags, but do // need size and pc-rel status properly updated. @@ -1037,7 +1037,7 @@ void Arm64Mir2Lir::AssembleLIR() { // Check that the instruction preceding the multiply-accumulate is a load or store. if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) { // insert a NOP between the load/store and the multiply-accumulate. - LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL); + LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr); new_lir->offset = lir->offset; new_lir->flags.fixup = kFixupNone; new_lir->flags.size = EncodingMap[kA64Nop0].size; @@ -1108,7 +1108,7 @@ size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) { uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) { LIR* end_lir = tail_lir->next; - LIR* last_fixup = NULL; + LIR* last_fixup = nullptr; for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) { A64Opcode opcode = UNWIDE(lir->opcode); if (!lir->flags.is_nop) { @@ -1123,8 +1123,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off } // Link into the fixup chain. lir->flags.use_def_invalid = true; - lir->u.a.pcrel_next = NULL; - if (first_fixup_ == NULL) { + lir->u.a.pcrel_next = nullptr; + if (first_fixup_ == nullptr) { first_fixup_ = lir; } else { last_fixup->u.a.pcrel_next = lir; diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index 4abbd77d88..9a7c2ade18 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -127,7 +127,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca } // Bounds check - if < 0 or >= size continue following switch OpRegImm(kOpCmp, key_reg, size - 1); - LIR* branch_over = OpCondBranch(kCondHi, NULL); + LIR* branch_over = OpCondBranch(kCondHi, nullptr); // Load the displacement from the switch table RegStorage disp_reg = AllocTemp(); @@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { - null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); + null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr); } } Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); @@ -176,12 +176,12 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { MarkPossibleNullPointerException(opt_flags); // Zero out the read barrier bits. OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled); - LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL); + LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr); // w3 is zero except for the rb bits here. Copy the read barrier bits into w1. OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3); OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value()); NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2); - LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL); + LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr); LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); not_unlocked_branch->target = slow_path_target; @@ -220,7 +220,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { } else { // If the null-check fails its handled by the slow-path to reduce exception related meta-data. if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { - null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL); + null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr); } } Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1); @@ -235,16 +235,16 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled); // Zero out except the read barrier bits. OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted); - LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL); + LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr); GenMemBarrier(kAnyStore); LIR* unlock_success_branch; if (!kUseReadBarrier) { Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2); - unlock_success_branch = OpUnconditionalBranch(NULL); + unlock_success_branch = OpUnconditionalBranch(nullptr); } else { OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value()); NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3); - unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL); + unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr); } LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); slow_unlock_branch->target = slow_path_target; diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc index b7dbd0a97d..9340d01640 100644 --- a/compiler/dex/quick/arm64/int_arm64.cc +++ b/compiler/dex/quick/arm64/int_arm64.cc @@ -803,7 +803,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg()); OpRegReg(kOpCmp, r_tmp, rl_expected.reg); DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); - LIR* early_exit = OpCondBranch(kCondNe, NULL); + LIR* early_exit = OpCondBranch(kCondNe, nullptr); NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg()); NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT); DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index e9ad8ba175..483231f931 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -121,7 +121,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) { } LIR* data_target = ScanLiteralPool(literal_list_, value, 0); - if (data_target == NULL) { + if (data_target == nullptr) { // Wide, as we need 8B alignment. data_target = AddWideData(&literal_list_, value, 0); } @@ -148,7 +148,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) { int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); - if (data_target == NULL) { + if (data_target == nullptr) { data_target = AddWideData(&literal_list_, val_lo, val_hi); } @@ -525,7 +525,7 @@ LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { int32_t val_lo = Low32Bits(value); int32_t val_hi = High32Bits(value); LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); - if (data_target == NULL) { + if (data_target == nullptr) { data_target = AddWideData(&literal_list_, val_lo, val_hi); } @@ -624,7 +624,7 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r } LOG(FATAL) << "Unexpected encoding operand count"; - return NULL; + return nullptr; } LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, @@ -658,7 +658,7 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage } LOG(FATAL) << "Unexpected encoding operand count"; - return NULL; + return nullptr; } LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) { @@ -1190,7 +1190,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt */ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size) { - LIR* load = NULL; + LIR* load = nullptr; A64Opcode opcode = kA64Brk1d; A64Opcode alt_opcode = kA64Brk1d; int scale = 0; @@ -1286,7 +1286,7 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { - LIR* store = NULL; + LIR* store = nullptr; A64Opcode opcode = kA64Brk1d; A64Opcode alt_opcode = kA64Brk1d; int scale = 0; diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 9f4a318cc2..fb68335e6e 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1080,7 +1080,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena reginfo_map_.reserve(RegStorage::kMaxRegs); pointer_storage_.reserve(128); slow_paths_.reserve(32); - // Reserve pointer id 0 for nullptr. + // Reserve pointer id 0 for null. size_t null_idx = WrapPointer<void>(nullptr); DCHECK_EQ(null_idx, 0U); } diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index ca31dbfde5..f5e6c09dba 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -392,7 +392,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods DexFileMethodInliner::DexFileMethodInliner() : lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock), - dex_file_(NULL) { + dex_file_(nullptr) { static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0"); static_assert(arraysize(kClassCacheNames) == kClassCacheLast, "bad arraysize for kClassCacheNames"); diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 1a72cd7c71..de5e0410fb 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -87,7 +87,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod( const RegStorage r_result_; }; - LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL); + LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm, @@ -113,10 +113,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); } - // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved. + // r_base now points at static storage (Class*) or null if the type is not yet resolved. LIR* unresolved_branch = nullptr; if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) { - // Check if r_base is nullptr. + // Check if r_base is null. unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr); } LIR* uninit_branch = nullptr; @@ -136,8 +136,8 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { public: // There are up to two branches to the static field slow path, the "unresolved" when the type - // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized. - // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path. + // entry in the dex cache is null, and the "uninit" when the class is not yet initialized. + // At least one will be non-null here, otherwise we wouldn't generate the slow path. StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, RegStorage r_base_in, RegStorage r_method_in) : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont), @@ -165,7 +165,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel } private: - // Second branch to the slow path, or nullptr if there's only one branch. + // Second branch to the slow path, or null if there's only one branch. LIR* const second_branch_; const int storage_index_; @@ -173,7 +173,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel RegStorage r_method_; }; - // The slow path is invoked if the r_base is nullptr or the class pointed + // The slow path is invoked if the r_base is null or the class pointed // to by it is not initialized. LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, @@ -319,7 +319,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { /* Perform an explicit null-check on a register. */ LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { - return NULL; + return nullptr; } return GenNullCheck(m_reg); } @@ -1188,7 +1188,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re DCHECK(!IsSameReg(result_reg, object.reg)); } LoadConstant(result_reg, 0); // assume false - LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); + LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr); RegStorage check_class = AllocTypedTemp(false, kRefReg); RegStorage object_class = AllocTypedTemp(false, kRefReg); @@ -1287,7 +1287,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken. LoadConstant(rl_result.reg, 0); } - LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); + LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr); /* load object->klass_ */ RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref. @@ -1295,7 +1295,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg, kNotVolatile); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ - LIR* branchover = NULL; + LIR* branchover = nullptr; if (type_known_final) { // rl_result == ref == class. GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg, @@ -1320,7 +1320,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (!type_known_abstract) { /* Uses branchovers */ LoadConstant(rl_result.reg, 1); // assume true - branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); + branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr); } OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class @@ -2129,7 +2129,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) { } if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { FlushAllRegs(); - LIR* branch = OpTestSuspend(NULL); + LIR* branch = OpTestSuspend(nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); } else { diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 54e5742837..4215e8bc50 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -46,7 +46,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) { if (rl_src.location == kLocPhysReg) { OpRegCopy(r_dest, rl_src.reg); } else if (IsInexpensiveConstant(rl_src)) { - // On 64-bit targets, will sign extend. Make sure constant reference is always NULL. + // On 64-bit targets, will sign extend. Make sure constant reference is always null. DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0)); LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src)); } else { diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index 936ff42c8c..f9b9684284 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -613,7 +613,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) { LOG(FATAL) << "Unexpected branch kind " << opcode; UNREACHABLE(); } - LIR* hop_target = NULL; + LIR* hop_target = nullptr; if (!unconditional) { hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel); LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0], @@ -650,7 +650,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) { LIR *lir; AssemblerStatus res = kSuccess; // Assume success. - for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { + for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) { if (lir->opcode < 0) { continue; } @@ -668,7 +668,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) { * (label2 - label1), where label1 is a standard * kPseudoTargetLabel and is stored in operands[2]. * If operands[3] is null, then label2 is a kPseudoTargetLabel - * and is found in lir->target. If operands[3] is non-NULL, + * and is found in lir->target. If operands[3] is non-nullptr, * then it is a Switch/Data table. */ int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset; @@ -863,7 +863,7 @@ int MipsMir2Lir::AssignInsnOffsets() { LIR* lir; int offset = 0; - for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { + for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) { lir->offset = offset; if (LIKELY(lir->opcode >= 0)) { if (!lir->flags.is_nop) { diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 05570e4bde..39b9cc7056 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -112,7 +112,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca // Test loop. RegStorage r_key = AllocTemp(); LIR* loop_label = NewLIR0(kPseudoTargetLabel); - LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL); + LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr); Load32Disp(r_base, 0, r_key); OpRegImm(kOpAdd, r_base, 8); OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label); @@ -188,7 +188,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca tab_rec->anchor = base_label; // Bounds check - if < 0 or >= size continue following switch. - LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL); + LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr); // Materialize the table base pointer. RegStorage r_base = AllocPtrSizeTemp(); diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 1ca8bb618b..9319c64784 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -68,7 +68,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg()); NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg()); - LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL); + LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr); NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg()); NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg()); NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg()); @@ -128,7 +128,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr break; default: LOG(FATAL) << "No support for ConditionCode: " << cond; - return NULL; + return nullptr; } if (cmp_zero) { branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg()); @@ -278,7 +278,7 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond // Implement as a branch-over. // TODO: Conditional move? LoadConstant(rs_dest, true_val); - LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL); + LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr); LoadConstant(rs_dest, false_val); LIR* target_label = NewLIR0(kPseudoTargetLabel); ne_branchover->target = target_label; @@ -447,7 +447,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) { // Test suspend flag, return target of taken suspend branch. LIR* MipsMir2Lir::OpTestSuspend(LIR* target) { OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1); - return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target); + return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target); } // Decrement register and branch on condition. diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 8ab542270d..95c61cd4ed 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -566,7 +566,7 @@ LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { /* Load value from base + scaled index. */ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size) { - LIR *first = NULL; + LIR *first = nullptr; LIR *res; MipsOpCode opcode = kMipsNop; bool is64bit = cu_->target64 && r_dest.Is64Bit(); @@ -640,7 +640,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor // Store value base base + scaled index. LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size) { - LIR *first = NULL; + LIR *first = nullptr; MipsOpCode opcode = kMipsNop; RegStorage t_reg = AllocTemp(); @@ -696,8 +696,8 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora * rlp and then restore. */ LIR *res; - LIR *load = NULL; - LIR *load2 = NULL; + LIR *load = nullptr; + LIR *load2 = nullptr; MipsOpCode opcode = kMipsNop; bool short_form = IS_SIMM16(displacement); bool is64bit = false; @@ -857,8 +857,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size) { LIR *res; - LIR *store = NULL; - LIR *store2 = NULL; + LIR *store = nullptr; + LIR *store2 = nullptr; MipsOpCode opcode = kMipsNop; bool short_form = IS_SIMM16(displacement); bool is64bit = false; diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 2deb72722f..e9e9161a1c 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -1219,7 +1219,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { block_label_list_[block_id].flags.fixup = kFixupLabel; AppendLIR(&block_label_list_[block_id]); - LIR* head_lir = NULL; + LIR* head_lir = nullptr; // If this is a catch block, export the start address. if (bb->catch_entry) { @@ -1245,7 +1245,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_); } - for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { + for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) { ResetRegPool(); if (cu_->disable_opt & (1 << kTrackLiveTemps)) { ClobberAllTemps(); @@ -1269,7 +1269,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { GenPrintLabel(mir); // Remember the first LIR for this block. - if (head_lir == NULL) { + if (head_lir == nullptr) { head_lir = &block_label_list_[bb->id]; // Set the first label as a scheduling barrier. DCHECK(!head_lir->flags.use_def_invalid); @@ -1309,7 +1309,7 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { cu_->NewTimingSplit("SpecialMIR2LIR"); // Find the first DalvikByteCode block. DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size()); - BasicBlock*bb = NULL; + BasicBlock*bb = nullptr; for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) { BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id); if (candidate->block_type == kDalvikByteCode) { @@ -1317,11 +1317,11 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) { break; } } - if (bb == NULL) { + if (bb == nullptr) { return false; } DCHECK_EQ(bb->start_offset, 0); - DCHECK(bb->first_mir_insn != NULL); + DCHECK(bb->first_mir_insn != nullptr); // Get the first instruction. MIR* mir = bb->first_mir_insn; @@ -1343,17 +1343,17 @@ void Mir2Lir::MethodMIR2LIR() { PreOrderDfsIterator iter(mir_graph_); BasicBlock* curr_bb = iter.Next(); BasicBlock* next_bb = iter.Next(); - while (curr_bb != NULL) { + while (curr_bb != nullptr) { MethodBlockCodeGen(curr_bb); // If the fall_through block is no longer laid out consecutively, drop in a branch. BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through); - if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) { + if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) { OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]); } curr_bb = next_bb; do { next_bb = iter.Next(); - } while ((next_bb != NULL) && (next_bb->block_type == kDead)); + } while ((next_bb != nullptr) && (next_bb->block_type == kDead)); } HandleSlowPaths(); } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index f9efe37cc9..8f08a51e95 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -388,7 +388,7 @@ class Mir2Lir { LIR* DefEnd() { return def_end_; } void SetDefEnd(LIR* def_end) { def_end_ = def_end; } void ResetDefBody() { def_start_ = def_end_ = nullptr; } - // Find member of aliased set matching storage_used; return nullptr if none. + // Find member of aliased set matching storage_used; return null if none. RegisterInfo* FindMatchingView(uint32_t storage_used) { RegisterInfo* res = Master(); for (; res != nullptr; res = res->GetAliasChain()) { @@ -605,7 +605,7 @@ class Mir2Lir { char* ArenaStrdup(const char* str) { size_t len = strlen(str) + 1; char* res = arena_->AllocArray<char>(len, kArenaAllocMisc); - if (res != NULL) { + if (res != nullptr) { strncpy(res, str, len); } return res; @@ -650,7 +650,7 @@ class Mir2Lir { void DumpPromotionMap(); void CodegenDump(); LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, - int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); + int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr); LIR* NewLIR0(int opcode); LIR* NewLIR1(int opcode, int dest); LIR* NewLIR2(int opcode, int dest, int src1); @@ -1120,8 +1120,8 @@ class Mir2Lir { * @param base_reg The register holding the base address. * @param offset The offset from the base. * @param check_value The immediate to compare to. - * @param target branch target (or nullptr) - * @param compare output for getting LIR for comparison (or nullptr) + * @param target branch target (or null) + * @param compare output for getting LIR for comparison (or null) * @returns The branch instruction that was generated. */ virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, @@ -1854,7 +1854,7 @@ class Mir2Lir { // to deduplicate the masks. ResourceMaskCache mask_cache_; - // Record the MIR that generated a given safepoint (nullptr for prologue safepoints). + // Record the MIR that generated a given safepoint (null for prologue safepoints). ArenaVector<std::pair<LIR*, MIR*>> safepoints_; // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing. @@ -1869,7 +1869,7 @@ class Mir2Lir { // For architectures that don't have true PC-relative addressing (see pc_rel_temp_ // above) and also have a limited range of offsets for loads, it's be useful to // know the minimum offset into the dex cache arrays, so we calculate that as well - // if pc_rel_temp_ isn't nullptr. + // if pc_rel_temp_ isn't null. uint32_t dex_cache_arrays_min_offset_; dwarf::LazyDebugFrameOpCodeWriter cfi_; diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index 555d5b9cf3..b3c73557a7 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -100,7 +100,7 @@ class QuickCFITest : public CFITest { } } m2l->AdjustSpillMask(); - m2l->GenEntrySequence(NULL, m2l->LocCReturnRef()); + m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef()); m2l->GenExitSequence(); m2l->HandleSlowPaths(); m2l->AssembleLIR(); diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index fc3e687469..39eb117e9c 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -102,7 +102,7 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = { static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), "kDisabledOpts unexpected"); -// Supported shorty types per instruction set. nullptr means that all are available. +// Supported shorty types per instruction set. null means that all are available. // Z : boolean // B : byte // S : short @@ -422,7 +422,7 @@ static int kInvokeOpcodes[] = { Instruction::INVOKE_VIRTUAL_RANGE_QUICK, }; -// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is +// Unsupported opcodes. null can be used when everything is supported. Size of the lists is // recorded below. static const int* kUnsupportedOpcodes[] = { // 0 = kNone. @@ -515,7 +515,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) { BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx); - if (bb == NULL) continue; + if (bb == nullptr) continue; if (bb->block_type == kDead) continue; for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index e779479780..8ec86fa56c 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -935,7 +935,7 @@ bool Mir2Lir::CheckCorePoolSanity() { RegStorage my_reg = info->GetReg(); RegStorage partner_reg = info->Partner(); RegisterInfo* partner = GetRegInfo(partner_reg); - DCHECK(partner != NULL); + DCHECK(partner != nullptr); DCHECK(partner->IsWide()); DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg()); DCHECK(partner->IsLive()); diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index af19f5eaed..eb3335798e 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -1633,7 +1633,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { AssemblerStatus res = kSuccess; // Assume success const bool kVerbosePcFixup = false; - for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { + for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) { if (IsPseudoLirOp(lir->opcode)) { continue; } @@ -1646,7 +1646,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { switch (lir->opcode) { case kX86Jcc8: { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); int delta = 0; CodeOffset pc; if (IS_SIMM8(lir->operands[0])) { @@ -1679,7 +1679,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { } case kX86Jcc32: { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */; CodeOffset target = target_lir->offset; int delta = target - pc; @@ -1695,7 +1695,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { } case kX86Jecxz8: { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); CodeOffset pc; pc = lir->offset + 2; // opcode + rel8 CodeOffset target = target_lir->offset; @@ -1706,7 +1706,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { } case kX86Jmp8: { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); int delta = 0; CodeOffset pc; if (IS_SIMM8(lir->operands[0])) { @@ -1738,7 +1738,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { } case kX86Jmp32: { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); CodeOffset pc = lir->offset + 5 /* opcode + rel32 */; CodeOffset target = target_lir->offset; int delta = target - pc; @@ -1748,7 +1748,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { default: if (lir->flags.fixup == kFixupLoad) { LIR *target_lir = lir->target; - DCHECK(target_lir != NULL); + DCHECK(target_lir != nullptr); CodeOffset target = target_lir->offset; // Handle 64 bit RIP addressing. if (lir->operands[1] == kRIPReg) { @@ -1950,7 +1950,7 @@ int X86Mir2Lir::AssignInsnOffsets() { LIR* lir; int offset = 0; - for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { + for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) { lir->offset = offset; if (LIKELY(!IsPseudoLirOp(lir->opcode))) { if (!lir->flags.is_nop) { diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d7a5eb04db..e2364d8548 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -80,7 +80,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat // Bounds check - if < 0 or >= size continue following switch OpRegImm(kOpCmp, keyReg, size - 1); - LIR* branch_over = OpCondBranch(kCondHi, NULL); + LIR* branch_over = OpCondBranch(kCondHi, nullptr); RegStorage addr_for_jump; if (cu_->target64) { diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 10af31a0c9..8e81746db5 100755 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -484,13 +484,13 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest, } else { NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); } - LIR* branch = NULL; + LIR* branch = nullptr; if (unordered_gt) { branch = NewLIR2(kX86Jcc8, 0, kX86CondPE); } // If the result reg can't be byte accessed, use a jump and move instead of a set. if (!IsByteRegister(rl_result.reg)) { - LIR* branch2 = NULL; + LIR* branch2 = nullptr; if (unordered_gt) { branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA); NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0); @@ -513,7 +513,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) { LIR* taken = &block_label_list_[bb->taken]; LIR* not_taken = &block_label_list_[bb->fall_through]; - LIR* branch = NULL; + LIR* branch = nullptr; RegLocation rl_src1; RegLocation rl_src2; if (is_double) { diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 2c13b6173f..943bfc0300 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -1569,7 +1569,7 @@ LIR* X86Mir2Lir::OpTestSuspend(LIR* target) { } else { OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0); } - return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target); + return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target); } // Decrement register and branch on condition @@ -3005,7 +3005,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, // Assume that there is no match. LoadConstant(result_reg, 0); - LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); + LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr); // We will use this register to compare to memory below. // References are 32 bit in memory, and 64 bit in registers (in 64 bit mode). diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index a16e242d08..b4603793b4 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -1281,7 +1281,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { RegLocation rl_return = GetReturn(kCoreReg); RegLocation rl_dest = InlineTarget(info); - // Is the string non-NULL? + // Is the string non-null? LoadValueDirectFixed(rl_obj, rs_rDX); GenNullCheck(rs_rDX, info->opt_flags); info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index efcb9eefb5..61a1becac1 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -578,7 +578,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { } else if (pc_rel_base_reg_.Valid() || cu_->target64) { // We will load the value from the literal area. LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); - if (data_target == NULL) { + if (data_target == nullptr) { data_target = AddWideData(&literal_list_, val_lo, val_hi); } @@ -642,8 +642,8 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, RegStorage r_dest, OpSize size) { - LIR *load = NULL; - LIR *load2 = NULL; + LIR *load = nullptr; + LIR *load2 = nullptr; bool is_array = r_index.Valid(); bool pair = r_dest.IsPair(); bool is64bit = ((size == k64) || (size == kDouble)); @@ -763,7 +763,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int } } - // Always return first load generated as this might cause a fault if base is nullptr. + // Always return first load generated as this might cause a fault if base is null. return load; } @@ -791,8 +791,8 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, RegStorage r_src, OpSize size, int opt_flags) { - LIR *store = NULL; - LIR *store2 = NULL; + LIR *store = nullptr; + LIR *store2 = nullptr; bool is_array = r_index.Valid(); bool pair = r_src.IsPair(); bool is64bit = (size == k64) || (size == kDouble); |