summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--compiler/compiled_method.cc2
-rw-r--r--compiler/dex/dataflow_iterator-inl.h8
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/global_value_numbering_test.cc3
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc10
-rw-r--r--compiler/dex/gvn_dead_code_elimination.h2
-rw-r--r--compiler/dex/local_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_analysis.cc6
-rw-r--r--compiler/dex/mir_dataflow.cc43
-rw-r--r--compiler/dex/mir_field_info.h4
-rw-r--r--compiler/dex/mir_graph.cc61
-rw-r--r--compiler/dex/mir_graph.h69
-rw-r--r--compiler/dex/mir_method_info.h5
-rw-r--r--compiler/dex/mir_optimization.cc120
-rw-r--r--compiler/dex/pass_driver.h2
-rw-r--r--compiler/dex/pass_driver_me.h7
-rw-r--r--compiler/dex/pass_driver_me_post_opt.cc2
-rw-r--r--compiler/dex/post_opt_passes.h20
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc18
-rw-r--r--compiler/dex/quick/arm/call_arm.cc16
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc6
-rw-r--r--compiler/dex/quick/arm/int_arm.cc16
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc12
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc16
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc14
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc10
-rw-r--r--compiler/dex/quick/gen_common.cc34
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc16
-rw-r--r--compiler/dex/quick/gen_loadstore.cc2
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc8
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/int_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc12
-rw-r--r--compiler/dex/quick/mir_to_lir.cc44
-rw-r--r--compiler/dex/quick/mir_to_lir.h20
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc2
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/quick/ralloc_util.cc2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc16
-rw-r--r--compiler/dex/quick/x86/call_x86.cc2
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc12
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc2
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc12
-rw-r--r--compiler/dex/ssa_transformation.cc40
-rw-r--r--compiler/dex/type_inference.cc1067
-rw-r--r--compiler/dex/type_inference.h443
-rw-r--r--compiler/dex/type_inference_test.cc2044
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/dex/verified_method.h2
-rw-r--r--compiler/dex/vreg_analysis.cc452
-rw-r--r--compiler/driver/compiler_driver-inl.h6
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/driver/compiler_driver.h14
-rw-r--r--compiler/driver/compiler_driver_test.cc31
-rw-r--r--compiler/driver/dex_compilation_unit.h3
-rw-r--r--compiler/dwarf/register.h1
-rw-r--r--compiler/elf_builder.h6
-rw-r--r--compiler/elf_writer_debug.cc8
-rw-r--r--compiler/elf_writer_quick.cc2
-rw-r--r--compiler/elf_writer_test.cc8
-rw-r--r--compiler/image_test.cc16
-rw-r--r--compiler/image_writer.cc24
-rw-r--r--compiler/jni/quick/calling_convention.cc4
-rw-r--r--compiler/jni/quick/jni_compiler.cc12
-rw-r--r--compiler/oat_writer.cc18
-rw-r--r--compiler/oat_writer.h8
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc13
-rw-r--r--compiler/optimizing/nodes.cc4
-rw-r--r--compiler/optimizing/nodes.h8
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h7
-rw-r--r--compiler/output_stream_test.cc6
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.h6
-rw-r--r--compiler/utils/assembler.cc8
-rw-r--r--compiler/utils/assembler.h18
-rw-r--r--compiler/utils/dedupe_set.h4
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/mips64/assembler_mips64.h6
-rw-r--r--compiler/utils/test_dex_file_builder.h372
-rw-r--r--compiler/utils/test_dex_file_builder_test.cc84
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc2
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
92 files changed, 4540 insertions, 988 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3458784de4..3f5271d31f 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,6 +23,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/global_value_numbering.cc \
dex/gvn_dead_code_elimination.cc \
dex/local_value_numbering.cc \
+ dex/type_inference.cc \
dex/quick/arm/assemble_arm.cc \
dex/quick/arm/call_arm.cc \
dex/quick/arm/fp_arm.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 05cb8b458e..5a9e04f5dd 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -263,7 +263,7 @@ void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
- CHECK(method != NULL) << "Virtual method not found: "
+ CHECK(method != nullptr) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970fdd..d1acada6dd 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6f04..83dfc28844 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@ namespace art {
// Single forward pass over the nodes.
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@ inline BasicBlock* DataflowIterator::ForwardSingleNext() {
// Repeat full forward passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we at the end and have we changed something?
if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@ inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
// Single reverse pass over the nodes.
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ >= 0) {
@@ -76,7 +76,7 @@ inline BasicBlock* DataflowIterator::ReverseSingleNext() {
// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we done and we changed something during the last iteration?
if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec9a0..097c2a40b4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@ namespace art {
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
- block_id_list_(NULL),
+ block_id_list_(nullptr),
idx_(0),
repeats_(0),
changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b66f..d1ddfda545 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@ extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::C
art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
- art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+ art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b4559ef375..c538d0beee 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -15,7 +15,6 @@
*/
#include "base/logging.h"
-#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
#include "dex/mir_field_info.h"
#include "global_value_numbering.h"
@@ -260,10 +259,8 @@ class GlobalValueNumberingTest : public testing::Test {
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index ec12221f3c..d7f36f787e 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -478,7 +478,7 @@ void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR + Instruction::ADD_INT);
}
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
int v_reg = mir_graph_->SRegToVReg(s_reg);
MIR* phi = mir_graph_->NewMIR();
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -491,11 +491,9 @@ MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
mir_graph_->AllocateSSADefData(phi, 1);
phi->ssa_rep->defs[0] = s_reg;
- phi->ssa_rep->fp_def[0] = fp;
size_t num_uses = bb_->predecessors.size();
mir_graph_->AllocateSSAUseData(phi, num_uses);
- std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
size_t idx = 0u;
for (BasicBlockId pred_id : bb_->predecessors) {
BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
@@ -523,14 +521,12 @@ MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint1
// defining MIR for that dalvik reg, the preserved valus must come from its predecessors
// and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
if (def_change == kNPos) {
- bool fp = mir_to_kill->ssa_rep->fp_def[0];
if (wide) {
DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
- DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
- CreatePhi(new_s_reg + 1, fp); // High word Phi.
+ CreatePhi(new_s_reg + 1); // High word Phi.
}
- return CreatePhi(new_s_reg, fp);
+ return CreatePhi(new_s_reg);
} else {
DCHECK_LT(def_change, last_change);
DCHECK_LE(last_change, vreg_chains_.NumMIRs());
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index 9a19f29970..f2378f2ced 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -128,7 +128,7 @@ class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
void KillMIR(MIRData* data);
static void KillMIR(MIR* mir);
static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
- MIR* CreatePhi(int s_reg, bool fp);
+ MIR* CreatePhi(int s_reg);
MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
// Update state variables going backwards through a MIR.
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527ad42..0393410867 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -158,10 +158,8 @@ class LocalValueNumberingTest : public testing::Test {
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640ce3..9099e8a54d 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
* edges until we reach an explicit branch or return.
*/
BasicBlock* ending_bb = bb;
- if (ending_bb->last_mir_insn != NULL) {
+ if (ending_bb->last_mir_insn != nullptr) {
uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & kAnBranch) == 0) {
ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
bool done = false;
while (!done) {
tbb->visited = true;
- for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
// Skip any MIR pseudo-op.
continue;
@@ -1195,7 +1195,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
ClearAllVisitedFlags();
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 2a920a4e29..b4aec98e01 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -123,7 +123,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_UA | DF_NULL_CHK_A | DF_REF_A,
// 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_REF_A | DF_UMS,
+ DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
// 20 INSTANCE_OF vA, vB, type@CCCC
DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
@@ -159,10 +159,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_NOP,
// 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2D CMPL_FLOAT vAA, vBB, vCC
DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
@@ -180,22 +180,22 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
// 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 38 IF_EQZ vAA, +BBBB
DF_UA,
@@ -989,7 +989,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
MIR* mir;
ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
use_v = bb->data_flow_info->use_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
live_in_v = bb->data_flow_info->live_in_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1080,8 +1080,6 @@ void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
if (mir->ssa_rep->num_uses_allocated < num_uses) {
mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
}
}
@@ -1090,7 +1088,6 @@ void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
if (mir->ssa_rep->num_defs_allocated < num_defs) {
mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
}
}
@@ -1191,7 +1188,7 @@ void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
/* Entry function to convert a block into SSA representation */
bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
/*
* Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1214,7 +1211,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
}
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
@@ -1287,35 +1284,27 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
if (df_attributes & DF_HAS_USES) {
num_uses = 0;
if (df_attributes & DF_UA) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
}
}
if (df_attributes & DF_UB) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
if (df_attributes & DF_B_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
}
}
if (df_attributes & DF_UC) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
if (df_attributes & DF_C_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
}
}
}
if (df_attributes & DF_HAS_DEFS) {
- mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
}
}
@@ -1413,8 +1402,8 @@ void MIRGraph::CountUses(BasicBlock* bb) {
return;
}
uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1459,7 +1448,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
VerifyPredInfo(bb);
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca5695893e..e4570fd8d3 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@ class MirFieldInfo {
uint16_t declaring_field_idx_;
// The type index of the class declaring the field, 0 if unresolved.
uint16_t declaring_class_idx_;
- // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ // The dex file that defines the class containing the field and the field, null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -179,6 +179,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
class MirSFieldLoweringInfo : public MirFieldInfo {
@@ -254,6 +255,7 @@ class MirSFieldLoweringInfo : public MirFieldInfo {
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4d340387f2..b5c42f11ac 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
- : reg_location_(NULL),
+ : reg_location_(nullptr),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
- is_constant_v_(NULL),
- constant_values_(NULL),
+ vreg_to_ssa_map_(nullptr),
+ ssa_last_defs_(nullptr),
+ is_constant_v_(nullptr),
+ constant_values_(nullptr),
use_counts_(arena->Adapter()),
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
@@ -106,24 +106,24 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
max_nested_loops_(0u),
- i_dom_list_(NULL),
+ i_dom_list_(nullptr),
temp_scoped_alloc_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
- try_block_addr_(NULL),
- entry_block_(NULL),
- exit_block_(NULL),
- current_code_item_(NULL),
+ try_block_addr_(nullptr),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ current_code_item_(nullptr),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
- opcode_count_(NULL),
+ opcode_count_(nullptr),
num_ssa_regs_(0),
extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
- checkstats_(NULL),
+ checkstats_(nullptr),
arena_(arena),
backward_branches_(0),
forward_branches_(0),
@@ -185,13 +185,13 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
- MIR* prev = NULL; // Will be set to instruction before split.
+ MIR* prev = nullptr; // Will be set to instruction before split.
while (insn) {
if (insn->offset == code_offset) break;
prev = insn;
insn = insn->next;
}
- if (insn == NULL) {
+ if (insn == nullptr) {
LOG(FATAL) << "Break split failed";
}
// Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
size = switch_data[1];
first_key = switch_data[2] | (switch_data[3] << 16);
target_table = reinterpret_cast<const int*>(&switch_data[4]);
- keyTable = NULL; // Make the compiler happy.
+ keyTable = nullptr; // Make the compiler happy.
/*
* Sparse switch data format:
* ushort ident = 0x0200 magic value
@@ -695,9 +695,10 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
current_method_ = m_units_.size();
current_offset_ = 0;
// TODO: will need to snapshot stack image and use that as the mir context identification.
- m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
- dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
- cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+ m_units_.push_back(new (arena_) DexCompilationUnit(
+ cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
+ current_code_item_, class_def_idx, method_idx, access_flags,
+ cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
const uint16_t* code_ptr = current_code_item_->insns_;
const uint16_t* code_end =
current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
@@ -717,8 +718,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// If this is the first method, set up default entry and exit blocks.
if (current_method_ == 0) {
- DCHECK(entry_block_ == NULL);
- DCHECK(exit_block_ == NULL);
+ DCHECK(entry_block_ == nullptr);
+ DCHECK(exit_block_ == nullptr);
DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -754,7 +755,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (opcode_count_ != NULL) {
+ if (opcode_count_ != nullptr) {
opcode_count_[static_cast<int>(opcode)]++;
}
@@ -878,7 +879,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
void MIRGraph::ShowOpcodeStats() {
- DCHECK(opcode_count_ != NULL);
+ DCHECK(opcode_count_ != nullptr);
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
if (opcode_count_[i] != 0) {
@@ -946,7 +947,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
return;
}
file = fopen(fpath.c_str(), "w");
- if (file == NULL) {
+ if (file == nullptr) {
PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
return;
}
@@ -960,7 +961,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
for (idx = 0; idx < num_blocks; idx++) {
int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
if (bb->hidden) continue;
if (bb->block_type == kEntryBlock) {
@@ -1500,8 +1501,8 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
}
nop = true;
}
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+ int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
// Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1529,7 +1530,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
for (int i = 0; i < uses; i++) {
str.append(" ");
str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
- if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+ if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
// For the listing, skip the high sreg.
i++;
}
@@ -1622,7 +1623,7 @@ std::string MIRGraph::GetSSAName(int ssa_reg) {
// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
- if (reg_location_ == NULL) {
+ if (reg_location_ == nullptr) {
// Pre-SSA - just use the standard name.
return GetSSAName(ssa_reg);
}
@@ -1715,7 +1716,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bo
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
- if (move_result_mir == NULL) {
+ if (move_result_mir == nullptr) {
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
@@ -2293,7 +2294,7 @@ bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const
void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
// Reset flags for all MIRs in bb.
- for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= (~reset_flags);
}
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 85b13448da..0db54bf23c 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -39,6 +39,7 @@ class DexFileMethodInliner;
class GlobalValueNumbering;
class GvnDeadCodeElimination;
class PassManager;
+class TypeInference;
// Forward declaration.
class MIRGraph;
@@ -64,6 +65,7 @@ enum DataFlowAttributePos {
kNullTransferSrc0, // Object copy src[0] -> dst.
kNullTransferSrcN, // Phi null check state transfer.
kRangeCheckC, // Range check of C.
+ kCheckCastA, // Check cast of A.
kFPA,
kFPB,
kFPC,
@@ -73,6 +75,7 @@ enum DataFlowAttributePos {
kRefA,
kRefB,
kRefC,
+ kSameTypeAB, // A and B have the same type but it can be core/ref/fp (IF_cc).
kUsesMethodStar, // Implicit use of Method*.
kUsesIField, // Accesses an instance field (IGET/IPUT).
kUsesSField, // Accesses a static field (SGET/SPUT).
@@ -101,6 +104,7 @@ enum DataFlowAttributePos {
#define DF_NULL_TRANSFER_0 (UINT64_C(1) << kNullTransferSrc0)
#define DF_NULL_TRANSFER_N (UINT64_C(1) << kNullTransferSrcN)
#define DF_RANGE_CHK_C (UINT64_C(1) << kRangeCheckC)
+#define DF_CHK_CAST (UINT64_C(1) << kCheckCastA)
#define DF_FP_A (UINT64_C(1) << kFPA)
#define DF_FP_B (UINT64_C(1) << kFPB)
#define DF_FP_C (UINT64_C(1) << kFPC)
@@ -110,6 +114,7 @@ enum DataFlowAttributePos {
#define DF_REF_A (UINT64_C(1) << kRefA)
#define DF_REF_B (UINT64_C(1) << kRefB)
#define DF_REF_C (UINT64_C(1) << kRefC)
+#define DF_SAME_TYPE_AB (UINT64_C(1) << kSameTypeAB)
#define DF_UMS (UINT64_C(1) << kUsesMethodStar)
#define DF_IFIELD (UINT64_C(1) << kUsesIField)
#define DF_SFIELD (UINT64_C(1) << kUsesSField)
@@ -217,13 +222,11 @@ struct BasicBlockDataFlow {
*/
struct SSARepresentation {
int32_t* uses;
- bool* fp_use;
int32_t* defs;
- bool* fp_def;
- int16_t num_uses_allocated;
- int16_t num_defs_allocated;
- int16_t num_uses;
- int16_t num_defs;
+ uint16_t num_uses_allocated;
+ uint16_t num_defs_allocated;
+ uint16_t num_uses;
+ uint16_t num_defs;
static uint32_t GetStartUseIndex(Instruction::Code opcode);
};
@@ -334,7 +337,8 @@ class MIR : public ArenaObject<kArenaAllocMIR> {
// SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
// the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
uint32_t sfield_lowering_info;
- // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+ // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
+ // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
uint32_t method_lowering_info;
} meta;
@@ -598,7 +602,7 @@ class MIRGraph {
BasicBlock* GetBasicBlock(unsigned int block_id) const {
DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
- return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+ return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
@@ -647,6 +651,10 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ bool HasCheckCast() const {
+ return (merged_df_flags_ & DF_CHK_CAST) != 0u;
+ }
+
bool HasFieldAccess() const {
return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
}
@@ -691,8 +699,16 @@ class MIRGraph {
void DoCacheMethodLoweringInfo();
const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
- return method_lowering_infos_[mir->meta.method_lowering_info];
+ return GetMethodLoweringInfo(mir->meta.method_lowering_info);
+ }
+
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
+ DCHECK_LT(lowering_info, method_lowering_infos_.size());
+ return method_lowering_infos_[lowering_info];
+ }
+
+ size_t GetMethodLoweringInfoCount() const {
+ return method_lowering_infos_.size();
}
void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -1073,7 +1089,9 @@ class MIRGraph {
bool EliminateNullChecksGate();
bool EliminateNullChecks(BasicBlock* bb);
void EliminateNullChecksEnd();
+ void InferTypesStart();
bool InferTypes(BasicBlock* bb);
+ void InferTypesEnd();
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
void EliminateClassInitChecksEnd();
@@ -1100,34 +1118,6 @@ class MIRGraph {
return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
}
- /*
- * Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
- * we have to do some work to figure out the sreg type. For some operations it is
- * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
- * may never know the "real" type.
- *
- * We perform the type inference operation by using an iterative walk over
- * the graph, propagating types "defined" by typed opcodes to uses and defs in
- * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined
- * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to
- * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag
- * tells whether our guess of the type is based on a previously typed definition.
- * If so, the defined type takes precedence. Note that it's possible to have the same sreg
- * show multiple defined types because dx treats constants as untyped bit patterns.
- * The return value of the Setxx() helpers says whether or not the Setxx() action changed
- * the current guess, and is used to know when to terminate the iterative walk.
- */
- bool SetFp(int index, bool is_fp);
- bool SetFp(int index);
- bool SetCore(int index, bool is_core);
- bool SetCore(int index);
- bool SetRef(int index, bool is_ref);
- bool SetRef(int index);
- bool SetWide(int index, bool is_wide);
- bool SetWide(int index);
- bool SetHigh(int index, bool is_high);
- bool SetHigh(int index);
-
bool PuntToInterpreter() {
return punt_to_interpreter_;
}
@@ -1252,7 +1242,6 @@ class MIRGraph {
static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
- bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
protected:
int FindCommonParent(int block1, int block2);
@@ -1399,6 +1388,7 @@ class MIRGraph {
ArenaBitVector* work_live_vregs;
ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
ArenaBitVector** phi_node_blocks; // num_vregs x num_blocks_.
+ TypeInference* ti;
} ssa;
// Global value numbering.
struct {
@@ -1458,6 +1448,7 @@ class MIRGraph {
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
friend class TopologicalSortOrderTest;
+ friend class TypeInferenceTest;
friend class QuickCFITest;
};
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 7230c462cd..946c74becf 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@ class MirMethodInfo {
// The type index of the class declaring the method, 0 if unresolved.
uint16_t declaring_class_idx_;
// The dex file that defines the class containing the method and the method,
- // nullptr if unresolved.
+ // null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -223,7 +223,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
uintptr_t direct_code_;
uintptr_t direct_method_;
// Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
- // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // devirtualized invoke target if available, null and 0u otherwise.
// After Resolve() they hold the actual target method that will be called; it will be either
// a devirtualized target method or the compilation's unit's dex file and MethodIndex().
const DexFile* target_dex_file_;
@@ -232,6 +232,7 @@ class MirMethodLoweringInfo : public MirMethodInfo {
int stats_flags_;
friend class MirOptimizationTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9d7b4b4dfd..467c14ed55 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -25,6 +25,7 @@
#include "gvn_dead_code_elimination.h"
#include "local_value_numbering.h"
#include "mir_field_info.h"
+#include "type_inference.h"
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
@@ -54,7 +55,7 @@ void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// Skip pass if BB has MIR without SSA representation.
if (mir->ssa_rep == nullptr) {
return;
@@ -115,11 +116,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
/* Advance to next strictly dominated MIR node in an extended basic block */
MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
- if (mir != NULL) {
+ if (mir != nullptr) {
mir = mir->next;
- while (mir == NULL) {
+ while (mir == nullptr) {
bb = GetBasicBlock(bb->fall_through);
- if ((bb == NULL) || Predecessors(bb) != 1) {
+ if ((bb == nullptr) || Predecessors(bb) != 1) {
// mir is null and we cannot proceed further.
break;
} else {
@@ -133,7 +134,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
/*
* To be used at an invoke mir. If the logically next mir node represents
- * a move-result, return it. Else, return NULL. If a move-result exists,
+ * a move-result, return it. Else, return nullptr. If a move-result exists,
* it is required to immediately follow the invoke with no intervening
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
@@ -141,7 +142,7 @@ MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* tbb = bb;
mir = AdvanceMIR(&tbb, mir);
- while (mir != NULL) {
+ while (mir != nullptr) {
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -151,7 +152,7 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
mir = AdvanceMIR(&tbb, mir);
} else {
- mir = NULL;
+ mir = nullptr;
}
}
return mir;
@@ -159,29 +160,29 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
- return NULL;
+ return nullptr;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
- if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+ if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+ bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
}
- if (bb == NULL || (Predecessors(bb) != 1)) {
- return NULL;
+ if (bb == nullptr || (Predecessors(bb) != 1)) {
+ return nullptr;
}
DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
return bb;
}
static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -190,11 +191,11 @@ static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
}
}
}
- return NULL;
+ return nullptr;
}
static SelectInstructionKind SelectKind(MIR* mir) {
- // Work with the case when mir is nullptr.
+ // Work with the case when mir is null.
if (mir == nullptr) {
return kSelectNone;
}
@@ -255,7 +256,8 @@ size_t MIRGraph::GetNumAvailableVRTemps() {
}
// Calculate remaining ME temps available.
- size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+ size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+ reserved_temps_for_backend_;
if (num_non_special_compiler_temps_ >= remaining_me_temps) {
return 0;
@@ -346,7 +348,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
size_t available_temps = GetNumAvailableVRTemps();
if (available_temps <= 0 || (available_temps <= 1 && wide)) {
if (verbose) {
- LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+ LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+ << " are available.";
}
return nullptr;
}
@@ -364,8 +367,8 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
if (verbose) {
- LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
- << " and s" << compiler_temp->s_reg_low << " has been created.";
+ LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+ << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
}
if (wide) {
@@ -477,8 +480,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
- while (bb != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ while (bb != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// TUNING: use the returned value number for CSE.
if (use_lvn) {
local_valnum->GetValueNumber(mir);
@@ -537,7 +540,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Bitcode doesn't allow this optimization.
break;
}
- if (mir->next != NULL) {
+ if (mir->next != nullptr) {
MIR* mir_next = mir->next;
// Make sure result of cmp is used by next insn and nowhere else
if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -574,7 +577,6 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Copy the SSA information that is relevant.
mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
mir_next->ssa_rep->uses = mir->ssa_rep->uses;
- mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
mir_next->ssa_rep->num_defs = 0;
mir->ssa_rep->num_uses = 0;
mir->ssa_rep->num_defs = 0;
@@ -594,12 +596,12 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
- DCHECK(ft != NULL);
+ DCHECK(ft != nullptr);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
BasicBlock* ft_tk = GetBasicBlock(ft->taken);
BasicBlock* tk = GetBasicBlock(bb->taken);
- DCHECK(tk != NULL);
+ DCHECK(tk != nullptr);
BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
BasicBlock* tk_tk = GetBasicBlock(tk->taken);
@@ -608,7 +610,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
* transfers to the rejoin block and the fall_though edge goes to a block that
* unconditionally falls through to the rejoin block.
*/
- if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
* Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* if_false = ft->first_mir_insn;
// It's possible that the target of the select isn't used - skip those (rare) cases.
MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
- if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
/*
* We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
* Phi node in the merge block and delete it (while using the SSA name
@@ -668,16 +670,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
mir->ssa_rep->uses = src_ssa;
mir->ssa_rep->num_uses = 3;
}
- mir->ssa_rep->num_defs = 1;
- mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
- // Match type of uses to def.
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
- kArenaAllocDFInfo);
- for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
- mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
- }
+ AllocateSSADefData(mir, 1);
/*
* There is usually a Phi node in the join block for our two cases. If the
* Phi node only contains our two cases as input, we will use the result
@@ -721,7 +714,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
}
}
- bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+ bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+ nullptr;
}
if (use_lvn && UNLIKELY(!global_valnum->Good())) {
LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -732,9 +726,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(class BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ if (bb->data_flow_info != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -935,7 +929,7 @@ bool MIRGraph::EliminateNullChecksGate() {
// reset MIR_MARK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= ~MIR_MARK;
}
}
@@ -1010,7 +1004,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1121,7 +1115,7 @@ void MIRGraph::EliminateNullChecksEnd() {
// converge MIR_MARK with MIR_IGNORE_NULL_CHECK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1131,23 +1125,26 @@ void MIRGraph::EliminateNullChecksEnd() {
}
}
+void MIRGraph::InferTypesStart() {
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
+}
+
/*
* Perform type and size inference for a basic block.
*/
bool MIRGraph::InferTypes(BasicBlock* bb) {
if (bb->data_flow_info == nullptr) return false;
- bool infer_changed = false;
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
- continue;
- }
-
- // Propagate type info.
- infer_changed = InferTypeAndSize(bb, mir, infer_changed);
- }
+ DCHECK(temp_.ssa.ti != nullptr);
+ return temp_.ssa.ti->Apply(bb);
+}
- return infer_changed;
+void MIRGraph::InferTypesEnd() {
+ DCHECK(temp_.ssa.ti != nullptr);
+ temp_.ssa.ti->Finish();
+ delete temp_.ssa.ti;
+ temp_.ssa.ti = nullptr;
}
bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1509,7 +1506,7 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
continue;
}
@@ -1540,7 +1537,8 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
->GenInline(this, bb, mir, target.dex_method_index)) {
if (cu_->verbose || cu_->print_pass) {
LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
- << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+ << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+ *target.dex_file)
<< "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< "\" @0x" << std::hex << mir->offset;
}
@@ -1564,7 +1562,7 @@ void MIRGraph::DumpCheckStats() {
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
CountChecks(bb);
}
if (stats->null_checks > 0) {
@@ -1597,7 +1595,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
bool terminated_by_return = false;
bool do_local_value_numbering = false;
// Visit blocks strictly dominated by this head.
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->visited = true;
terminated_by_return |= bb->terminated_by_return;
do_local_value_numbering |= bb->use_lvn;
@@ -1606,7 +1604,7 @@ bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (terminated_by_return || do_local_value_numbering) {
// Do lvn for all blocks in this extended set.
bb = start_bb;
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->use_lvn = do_local_value_numbering;
bb->dominates_return = terminated_by_return;
bb = NextDominatedBlock(bb);
@@ -1629,7 +1627,7 @@ void MIRGraph::BasicBlockOptimization() {
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
// Perform extended basic block optimizations.
@@ -1638,7 +1636,7 @@ void MIRGraph::BasicBlockOptimization() {
}
} else {
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
BasicBlockOpt(bb);
}
}
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcecfba..8762b53af4 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@ class PassDriver {
* @return whether the pass was applied.
*/
virtual bool RunPass(const char* pass_name) {
- // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ // Paranoid: c_unit cannot be null and we need a pass name.
DCHECK(pass_name != nullptr);
DCHECK_NE(pass_name[0], 0);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef225ee..cbe4a02edb 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@ class PassDriverME: public PassDriver {
}
bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
DCHECK(pass != nullptr);
DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@ class PassDriverME: public PassDriver {
* @param settings_to_fill Fills the options to contain the mapping of name of option to the new
* configuration.
*/
- static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
- SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+ static void FillOverriddenPassSettings(
+ const PassManagerOptions* options, const char* pass_name,
+ SafeMap<const std::string, const OptionContent>& settings_to_fill) {
const std::string& settings = options->GetOverriddenPassOptions();
const size_t settings_len = settings.size();
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index a8b8a54033..b35bc3d7d3 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -41,7 +41,7 @@ void PassDriverMEPostOpt::SetupPasses(PassManager* pass_manager) {
pass_manager->AddPass(new SSAConversion);
pass_manager->AddPass(new PhiNodeOperands);
pass_manager->AddPass(new PerformInitRegLocations);
- pass_manager->AddPass(new TypeInference);
+ pass_manager->AddPass(new TypeInferencePass);
pass_manager->AddPass(new FinishSSATransformation);
}
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 1ab862503b..e9fa0eb578 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -263,12 +263,19 @@ class PerformInitRegLocations : public PassMEMirSsaRep {
};
/**
- * @class TypeInference
+ * @class TypeInferencePass
* @brief Type inference pass.
*/
-class TypeInference : public PassMEMirSsaRep {
+class TypeInferencePass : public PassMEMirSsaRep {
public:
- TypeInference() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ }
+
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InferTypesStart();
}
bool Worker(PassDataHolder* data) const {
@@ -280,6 +287,13 @@ class TypeInference : public PassMEMirSsaRep {
DCHECK(bb != nullptr);
return c_unit->mir_graph->InferTypes(bb);
}
+
+ void End(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->InferTypesEnd();
+ }
};
/**
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1508..df4a9f2048 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@ void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
uint8_t* const write_buffer = write_pos;
- for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@ void ArmMir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
/*
* NOTE: the lir being considered here will be encoded following the switch (so long as
* we're not in a retry situation). However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@ void ArmMir2Lir::AssembleLIR() {
case kFixupAdr: {
const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
- int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
offset_adjustment);
int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@ size_t ArmMir2Lir::GetInsnSize(LIR* lir) {
uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@ uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offse
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6169..6ba4016260 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
// r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index eb1383fcff..94fc4743a4 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -187,7 +187,8 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, Re
return;
}
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_FLOAT: {
rl_src = LoadValueWide(rl_src, kFPReg);
@@ -217,7 +218,8 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, Re
return;
}
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 47669db979..8d20f1b37e 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocatio
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
- LIR* branch1 = OpCondBranch(kCondLt, NULL);
- LIR* branch2 = OpCondBranch(kCondGt, NULL);
+ LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+ LIR* branch2 = OpCondBranch(kCondGt, nullptr);
OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
- LIR* branch3 = OpCondBranch(kCondEq, NULL);
+ LIR* branch3 = OpCondBranch(kCondEq, nullptr);
LIR* it = OpIT(kCondHi, "E");
NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
* generate the long form in an attempt to avoid an extra assembly pass.
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
- bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+ bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -882,7 +882,7 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_new_value;
if (!is_long) {
- rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
}
@@ -905,7 +905,7 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
RegLocation rl_expected;
if (!is_long) {
- rl_expected = LoadValue(rl_src_expected, LocToRegClass(rl_src_new_value));
+ rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
@@ -1159,12 +1159,12 @@ void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#ifdef ARM_R4_SUSPEND_FLAG
NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
t_reg, kUnsignedHalf, kNotVolatile);
- LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+ LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
return cmp_branch;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea6941c0..2ef92f851b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@ LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
}
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&literal_list_, value);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_s
return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
}
@@ -695,7 +695,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
}
LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR* res = NULL;
+ LIR* res = nullptr;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@ LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
- if (res == NULL) {
+ if (res == nullptr) {
// No short form - load from the literal pool.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
- LIR* store = NULL;
+ LIR* store = nullptr;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66bfc..b78fb80aa0 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
// new_lir replaces orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@ void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@ void Arm64Mir2Lir::AssembleLIR() {
generation ^= 1;
// Note: nodes requiring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
// NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
// the time of insertion. Note that inserted instructions don't need use/def flags, but do
// need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@ void Arm64Mir2Lir::AssembleLIR() {
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
- LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
new_lir->offset = lir->offset;
new_lir->flags.fixup = kFixupNone;
new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@ size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
A64Opcode opcode = UNWIDE(lir->opcode);
if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77d88..9a7c2ade18 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, key_reg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
// w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@ void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a97d..9340d01640 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
- LIR* early_exit = OpCondBranch(kCondNe, NULL);
+ LIR* early_exit = OpCondBranch(kCondNe, nullptr);
NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba175..483231f931 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
// Wide, as we need 8B alignment.
data_target = AddWideData(&literal_list_, value, 0);
}
@@ -148,7 +148,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -525,7 +525,7 @@ LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -624,7 +624,7 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
*/
LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
+ LIR* load = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
@@ -1286,7 +1286,7 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
+ LIR* store = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318cc2..fb68335e6e 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for nullptr.
+ // Reserve pointer id 0 for null.
size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 4ac6c0c5b5..f5e6c09dba 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -368,9 +368,9 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
#define UNSAFE_GET_PUT(type, code, type_flags) \
INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
+ type_flags), \
INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
+ type_flags | kIntrinsicFlagIsVolatile), \
INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
type_flags), \
INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
@@ -392,7 +392,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
- dex_file_(NULL) {
+ dex_file_(nullptr) {
static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
"bad arraysize for kClassCacheNames");
@@ -507,6 +507,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
intrinsic.d.data & kIntrinsicFlagIsObject);
case kIntrinsicUnsafeGet:
return backend->GenInlinedUnsafeGet(info, intrinsic.d.data & kIntrinsicFlagIsLong,
+ intrinsic.d.data & kIntrinsicFlagIsObject,
intrinsic.d.data & kIntrinsicFlagIsVolatile);
case kIntrinsicUnsafePut:
return backend->GenInlinedUnsafePut(info, intrinsic.d.data & kIntrinsicFlagIsLong,
@@ -752,6 +753,7 @@ bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, M
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -790,6 +792,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -912,6 +915,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
}
move->dalvikInsn.vA = move_result->dalvikInsn.vA;
move->dalvikInsn.vB = return_reg;
+ move->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(insn, move);
}
return true;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b132c4cc54..de5e0410fb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@ void Mir2Lir::GenIfNullUseHelperImmMethod(
const RegStorage r_result_;
};
- LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
}
- // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+ // r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
- // Check if r_base is nullptr.
+ // Check if r_base is null.
unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
}
LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
// There are up to two branches to the static field slow path, the "unresolved" when the type
- // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
- // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
RegStorage r_base_in, RegStorage r_method_in)
: LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
}
private:
- // Second branch to the slow path, or nullptr if there's only one branch.
+ // Second branch to the slow path, or null if there's only one branch.
LIR* const second_branch_;
const int storage_index_;
@@ -173,7 +173,7 @@ RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& fiel
RegStorage r_method_;
};
- // The slow path is invoked if the r_base is nullptr or the class pointed
+ // The slow path is invoked if the r_base is null or the class pointed
// to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
/* Perform an explicit null-check on a register. */
LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+ return nullptr;
}
return GenNullCheck(m_reg);
}
@@ -1188,7 +1188,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
DCHECK(!IsSameReg(result_reg, object.reg));
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
RegStorage check_class = AllocTypedTemp(false, kRefReg);
RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
// On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
/* load object->klass_ */
RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* branchover = NULL;
+ LIR* branchover = nullptr;
if (type_known_final) {
// rl_result == ref == class.
GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (!type_known_abstract) {
/* Uses branchovers */
LoadConstant(rl_result.reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
}
OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
@@ -2088,7 +2088,7 @@ void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
- RegLocation rl_src) {
+ RegLocation rl_src, RegisterClass return_reg_class) {
/*
* Don't optimize the register usage since it calls out to support
* functions
@@ -2097,12 +2097,10 @@ void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_d
FlushAllRegs(); /* Send everything to home location */
CallRuntimeHelperRegLocation(trampoline, rl_src, false);
if (rl_dest.wide) {
- RegLocation rl_result;
- rl_result = GetReturnWide(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturnWide(return_reg_class);
StoreValueWide(rl_dest, rl_result);
} else {
- RegLocation rl_result;
- rl_result = GetReturn(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturn(return_reg_class);
StoreValue(rl_dest, rl_result);
}
}
@@ -2131,7 +2129,7 @@ void Mir2Lir::GenSuspendTest(int opt_flags) {
}
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
+ LIR* branch = OpTestSuspend(nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index db7095dafb..1eb3a5f1b5 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -882,8 +882,6 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -896,8 +894,6 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -1338,7 +1334,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
}
bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
- bool is_long, bool is_volatile) {
+ bool is_long, bool is_object, bool is_volatile) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
// TODO: add Mips and Mips64 implementations.
return false;
@@ -1351,7 +1347,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
+ RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
if (is_long) {
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
|| cu_->instruction_set == kArm64) {
@@ -1411,7 +1407,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
FreeTemp(rl_temp_offset);
}
} else {
- rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
+ rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
if (rl_value.ref) {
StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
} else {
@@ -1499,11 +1495,13 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
FreeCallTemps();
if (info->result.location != kLocInvalid) {
// We have a following MOVE_RESULT - do it now.
+ RegisterClass reg_class =
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
if (info->result.wide) {
- RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturnWide(reg_class);
StoreValueWide(info->result, ret_loc);
} else {
- RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturn(reg_class);
StoreValue(info->result, ret_loc);
}
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742837..4215e8bc50 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ // On 64-bit targets, will sign extend. Make sure constant reference is always null.
DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42c8c..f9b9684284 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
LOG(FATAL) << "Unexpected branch kind " << opcode;
UNREACHABLE();
}
- LIR* hop_target = NULL;
+ LIR* hop_target = nullptr;
if (!unconditional) {
hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success.
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
@@ -668,7 +668,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
* (label2 - label1), where label1 is a standard
* kPseudoTargetLabel and is stored in operands[2].
* If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
+ * and is found in lir->target. If operands[3] is non-nullptr,
* then it is a Switch/Data table.
*/
int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@ int MipsMir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4bde..39b9cc7056 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
// Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
// Materialize the table base pointer.
RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb618b..9319c64784 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
+ return nullptr;
}
if (cmp_zero) {
branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
@@ -447,7 +447,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+ return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
// Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab542270d..95c61cd4ed 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@ LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
/* Load value from base + scaled index. */
LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
LIR *res;
MipsOpCode opcode = kMipsNop;
bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
MipsOpCode opcode = kMipsNop;
RegStorage t_reg = AllocTemp();
@@ -696,8 +696,8 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
* rlp and then restore.
*/
LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
@@ -857,8 +857,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 961cd4f06b..e9e9161a1c 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -104,19 +104,6 @@ RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
return res;
}
-RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
- RegisterClass res;
- if (loc.fp) {
- DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
- res = kFPReg;
- } else if (loc.ref) {
- res = kRefReg;
- } else {
- res = kCoreReg;
- }
- return res;
-}
-
void Mir2Lir::LockArg(size_t in_position) {
RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
@@ -560,25 +547,20 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
+ StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::RETURN_WIDE:
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
- break;
-
- case Instruction::MOVE_RESULT_WIDE:
- StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
+ StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
case Instruction::MOVE_RESULT_OBJECT:
- StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
+ // Already processed with invoke or filled-new-array.
break;
case Instruction::MOVE:
@@ -1237,7 +1219,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
block_label_list_[block_id].flags.fixup = kFixupLabel;
AppendLIR(&block_label_list_[block_id]);
- LIR* head_lir = NULL;
+ LIR* head_lir = nullptr;
// If this is a catch block, export the start address.
if (bb->catch_entry) {
@@ -1263,7 +1245,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
}
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllTemps();
@@ -1287,7 +1269,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
GenPrintLabel(mir);
// Remember the first LIR for this block.
- if (head_lir == NULL) {
+ if (head_lir == nullptr) {
head_lir = &block_label_list_[bb->id];
// Set the first label as a scheduling barrier.
DCHECK(!head_lir->flags.use_def_invalid);
@@ -1327,7 +1309,7 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
- BasicBlock*bb = NULL;
+ BasicBlock*bb = nullptr;
for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
if (candidate->block_type == kDalvikByteCode) {
@@ -1335,11 +1317,11 @@ bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
break;
}
}
- if (bb == NULL) {
+ if (bb == nullptr) {
return false;
}
DCHECK_EQ(bb->start_offset, 0);
- DCHECK(bb->first_mir_insn != NULL);
+ DCHECK(bb->first_mir_insn != nullptr);
// Get the first instruction.
MIR* mir = bb->first_mir_insn;
@@ -1361,17 +1343,17 @@ void Mir2Lir::MethodMIR2LIR() {
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
BasicBlock* next_bb = iter.Next();
- while (curr_bb != NULL) {
+ while (curr_bb != nullptr) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
- if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
- } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+ } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
}
HandleSlowPaths();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index db59714742..8f08a51e95 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@ class Mir2Lir {
LIR* DefEnd() { return def_end_; }
void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
void ResetDefBody() { def_start_ = def_end_ = nullptr; }
- // Find member of aliased set matching storage_used; return nullptr if none.
+ // Find member of aliased set matching storage_used; return null if none.
RegisterInfo* FindMatchingView(uint32_t storage_used) {
RegisterInfo* res = Master();
for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@ class Mir2Lir {
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
- if (res != NULL) {
+ if (res != nullptr) {
strncpy(res, str, len);
}
return res;
@@ -634,7 +634,6 @@ class Mir2Lir {
}
RegisterClass ShortyToRegClass(char shorty_type);
- RegisterClass LocToRegClass(RegLocation loc);
int ComputeFrameSize();
void Materialize();
virtual CompiledMethod* GetCompiledMethod();
@@ -651,7 +650,7 @@ class Mir2Lir {
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
- int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
LIR* NewLIR2(int opcode, int dest, int src1);
@@ -846,7 +845,8 @@ class Mir2Lir {
RegLocation rl_src, int lit);
virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2, int flags);
- void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass return_reg_class);
void GenSuspendTest(int opt_flags);
void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -954,7 +954,7 @@ class Mir2Lir {
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
virtual bool GenInlinedCurrentThread(CallInfo* info);
- bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+ bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile);
bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
bool is_volatile, bool is_ordered);
@@ -1120,8 +1120,8 @@ class Mir2Lir {
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
- * @param target branch target (or nullptr)
- * @param compare output for getting LIR for comparison (or nullptr)
+ * @param target branch target (or null)
+ * @param compare output for getting LIR for comparison (or null)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@ class Mir2Lir {
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ // Record the MIR that generated a given safepoint (null for prologue safepoints).
ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
// The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@ class Mir2Lir {
// For architectures that don't have true PC-relative addressing (see pc_rel_temp_
// above) and also have a limited range of offsets for loads, it's be useful to
// know the minimum offset into the dex cache arrays, so we calculate that as well
- // if pc_rel_temp_ isn't nullptr.
+ // if pc_rel_temp_ isn't null.
uint32_t dex_cache_arrays_min_offset_;
dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 555d5b9cf3..b3c73557a7 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,7 @@ class QuickCFITest : public CFITest {
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687469..39eb117e9c 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
// Z : boolean
// B : byte
// S : short
@@ -422,7 +422,7 @@ static int kInvokeOpcodes[] = {
Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
};
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
// 0 = kNone.
@@ -515,7 +515,7 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479780..8ec86fa56c 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@ bool Mir2Lir::CheckCorePoolSanity() {
RegStorage my_reg = info->GetReg();
RegStorage partner_reg = info->Partner();
RegisterInfo* partner = GetRegInfo(partner_reg);
- DCHECK(partner != NULL);
+ DCHECK(partner != nullptr);
DCHECK(partner->IsWide());
DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5eaed..eb3335798e 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (IsPseudoLirOp(lir->opcode)) {
continue;
}
@@ -1646,7 +1646,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
switch (lir->opcode) {
case kX86Jcc8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jcc32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1695,7 +1695,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jecxz8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc;
pc = lir->offset + 2; // opcode + rel8
CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
}
case kX86Jmp32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1748,7 +1748,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
default:
if (lir->flags.fixup == kFixupLoad) {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset target = target_lir->offset;
// Handle 64 bit RIP addressing.
if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@ int X86Mir2Lir::AssignInsnOffsets() {
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb04db..e2364d8548 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
RegStorage addr_for_jump;
if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cfe0480c54..8e81746db5 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -309,7 +309,8 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
}
return;
case Instruction::DOUBLE_TO_LONG:
@@ -334,7 +335,8 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
}
return;
default:
@@ -482,13 +484,13 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
} else {
NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- LIR* branch = NULL;
+ LIR* branch = nullptr;
if (unordered_gt) {
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
if (!IsByteRegister(rl_result.reg)) {
- LIR* branch2 = NULL;
+ LIR* branch2 = nullptr;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -511,7 +513,7 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
- LIR* branch = NULL;
+ LIR* branch = nullptr;
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 1043815e10..943bfc0300 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1229,7 +1229,7 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
LockTemp(rs_r0);
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
- RegLocation rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ RegLocation rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
@@ -1569,7 +1569,7 @@ LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
}
- return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+ return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
@@ -3005,7 +3005,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
// We will use this register to compare to memory below.
// References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242d08..b4603793b4 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
- // Is the string non-NULL?
+ // Is the string non-null?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9eefb5..61a1becac1 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
} else if (pc_rel_base_reg_.Valid() || cu_->target64) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -642,8 +642,8 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) {
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_dest.IsPair();
bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
- // Always return first load generated as this might cause a fault if base is nullptr.
+ // Always return first load generated as this might cause a fault if base is null.
return load;
}
@@ -791,8 +791,8 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size,
int opt_flags) {
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_src.IsPair();
bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d017..939bf40564 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@ namespace art {
void MIRGraph::ClearAllVisitedFlags() {
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
bb->visited = false;
}
}
BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
- if (bb != NULL) {
+ if (bb != nullptr) {
if (bb->visited || bb->hidden) {
- bb = NULL;
+ bb = nullptr;
}
}
return bb;
@@ -42,13 +42,13 @@ BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
- if (res == NULL) {
+ if (res == nullptr) {
res = NeedsVisit(GetBasicBlock(bb->taken));
- if (res == NULL) {
+ if (res == nullptr) {
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
- if (res != NULL) {
+ if (res != nullptr) {
break;
}
}
@@ -75,7 +75,7 @@ void MIRGraph::RecordDFSOrders(BasicBlock* block) {
while (!succ.empty()) {
BasicBlock* curr = succ.back();
BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
- if (next_successor != NULL) {
+ if (next_successor != nullptr) {
MarkPreOrder(next_successor);
succ.push_back(next_successor);
continue;
@@ -107,7 +107,7 @@ void MIRGraph::ComputeDFSOrders() {
if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (!bb->visited) {
bb->Kill(this);
}
@@ -121,7 +121,7 @@ void MIRGraph::ComputeDFSOrders() {
* register idx is defined in BasicBlock bb.
*/
bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -149,11 +149,11 @@ void MIRGraph::ComputeDefBlockMatrix() {
}
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
AllNodesIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -247,7 +247,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL) {
+ if (bb->dominators == nullptr) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
true /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@ void MIRGraph::ComputeDominators() {
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -376,7 +376,7 @@ void MIRGraph::ComputeDominators() {
/* Compute the immediate dominators */
RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
- for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+ for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
}
@@ -387,19 +387,19 @@ void MIRGraph::ComputeDominators() {
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
- for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+ for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
SetDominators(bb);
}
ReversePostOrderDfsIterator iter4(this);
- for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+ for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
PostOrderDOMIterator iter5(this);
- for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+ for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -434,7 +434,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
void MIRGraph::FindPhiNodeBlocks() {
RepeatingPostOrderDfsIterator iter(this);
bool change = false;
- for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+ for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
}
@@ -505,7 +505,7 @@ void MIRGraph::FindPhiNodeBlocks() {
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Phi nodes are at the beginning of each block */
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
new file mode 100644
index 0000000000..19d591ba41
--- /dev/null
+++ b/compiler/dex/type_inference.cc
@@ -0,0 +1,1067 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_inference.h"
+
+#include "base/bit_vector-inl.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex_file-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "mir_field_info.h"
+#include "mir_graph.h"
+#include "mir_method_info.h"
+
+namespace art {
+
+inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
+ DCHECK_NE(array_depth, 0u);
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
+ ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
+}
+
+inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
+ if (component_type.ArrayDepth() == 0u) {
+ return ArrayType(1u, component_type);
+ }
+ if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
+ return component_type;
+ }
+ return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart)); // array_depth + 1u;
+}
+
+TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
+ switch (shorty) {
+ case 'L':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ case 'D':
+ return Type(kFlagLowWord | kFlagWide | kFlagFp);
+ case 'J':
+ return Type(kFlagLowWord | kFlagWide | kFlagCore);
+ case 'F':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
+ default:
+ DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
+ return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
+ }
+}
+
+TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
+ const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
+ if (UNLIKELY(desc[0] == 'V')) {
+ return Unknown();
+ } else if (UNLIKELY(desc[0] == '[')) {
+ size_t array_depth = 0u;
+ while (*desc == '[') {
+ ++array_depth;
+ ++desc;
+ }
+ if (UNLIKELY(array_depth > kMaxArrayDepth)) {
+ LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
+ << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
+ array_depth = kMaxArrayDepth;
+ }
+ Type shorty_result = Type::ShortyType(desc[0]);
+ return ArrayType(array_depth, shorty_result);
+ } else {
+ return ShortyType(desc[0]);
+ }
+}
+
+bool TypeInference::Type::MergeArrayConflict(Type src_type) {
+ DCHECK(Ref());
+ DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
+ DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
+ bool size_conflict =
+ (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
+ (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
+ // Mark all three array type bits so that merging any other type bits will not change this type.
+ return Copy(Type((raw_bits_ & kMaskNonArray) |
+ (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
+ kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
+}
+
+bool TypeInference::Type::MergeStrong(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u) {
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
+ raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
+ changed = true;
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Overwrite [? or [L with the source array type which is at least [[.
+ raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
+ changed = true;
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+bool TypeInference::Type::MergeWeak(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
+ DCHECK_NE(src_type.ArrayDepth(), 0u);
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ // Preserve current type.
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
+ changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ alloc_(alloc),
+ num_blocks_(mir_graph->GetNumBlocks()),
+ num_sregs_(mir_graph->GetNumSSARegs()),
+ check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
+ split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
+}
+
+void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ type.CheckPureRef();
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type}); // NOLINT
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ auto lb = split_sreg_data_.lower_bound(s_reg);
+ if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
+ SplitSRegData split_s_reg_data = {
+ 0,
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
+ };
+ std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
+ std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
+ split_s_reg_data.def_phi_blocks_->ClearAllBits();
+ BasicBlock* def_bb = FindDefBlock(check_cast);
+ split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
+ split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
+ lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
+ }
+ lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
+ lb->second.def_phi_blocks_->SetBit(check_cast->bb);
+}
+
+void TypeInference::CheckCastData::AddPseudoPhis() {
+ // Look for pseudo-phis where a split SSA reg merges with a differently typed version
+ // and initialize all starting_mod_s_reg.
+ DCHECK(!split_sreg_data_.empty());
+ ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
+
+ for (auto& entry : split_sreg_data_) {
+ SplitSRegData& data = entry.second;
+
+ // Find pseudo-phi nodes.
+ phi_blocks->ClearAllBits();
+ ArenaBitVector* input_blocks = data.def_phi_blocks_;
+ do {
+ for (uint32_t idx : input_blocks->Indexes()) {
+ BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
+ if (def_bb->dom_frontier != nullptr) {
+ phi_blocks->Union(def_bb->dom_frontier);
+ }
+ }
+ } while (input_blocks->Union(phi_blocks));
+
+ // Find live pseudo-phis. Make sure they're merging the same SSA reg.
+ data.def_phi_blocks_->ClearAllBits();
+ int32_t s_reg = entry.first;
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
+ BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
+ DCHECK(phi_bb != nullptr);
+ DCHECK(phi_bb->data_flow_info != nullptr);
+ DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
+ if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
+ data.def_phi_blocks_->SetBit(phi_bb_id);
+ }
+ }
+
+ // SSA rename for s_reg.
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
+ continue;
+ }
+ BasicBlockId bb_id = bb->id;
+ if (data.def_phi_blocks_->IsBitSet(bb_id)) {
+ DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ } else {
+ DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
+ // The earliest predecessor must have been processed already.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
+ data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
+ } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
+ // Start the original defining block with s_reg.
+ data.starting_mod_s_reg[bb_id] = s_reg;
+ }
+ }
+ if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
+ // If the block doesn't define the modified SSA reg, it propagates the starting type.
+ data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
+ }
+ }
+ }
+}
+
+void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
+ }
+}
+
+void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
+ sregs[entry.second.modified_s_reg].AsNull());
+ }
+}
+
+void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
+ for (auto& entry : split_sreg_data_) {
+ for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
+ bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
+ }
+ }
+}
+
+void TypeInference::CheckCastData::Start(BasicBlock* bb) {
+ for (auto& entry : split_sreg_data_) {
+ entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
+ }
+}
+
+bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
+ bool changed = false;
+ for (auto& entry : split_sreg_data_) {
+ DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
+ if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
+ int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
+ Type merged_type = sregs[entry.second.current_mod_s_reg];
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // This can happen during an initial merge of a loop head if the original def is
+ // actually an untyped null. (All other definitions are typed using the check-cast.)
+ } else if (merged_type.Wide()) {
+ // Ignore the pseudo-phi, just remember that there's a size mismatch.
+ sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
+ } else {
+ DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
+ }
+ }
+ }
+ }
+ return changed;
+}
+
+void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
+ auto mir_it = check_cast_map_.find(mir);
+ DCHECK(mir_it != check_cast_map_.end());
+ auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
+ DCHECK(sreg_it != split_sreg_data_.end());
+ sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
+}
+
+TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
+ auto it = split_sreg_data_.find(s_reg);
+ return (it == split_sreg_data_.end()) ? nullptr : &it->second;
+}
+
+BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
+ // Find the initial definition of the SSA reg used by the check-cast.
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ if (mir_graph_->IsInVReg(s_reg)) {
+ return mir_graph_->GetEntryBlock();
+ }
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
+ DCHECK(bb != nullptr);
+ while (true) {
+ // Find the earliest predecessor in the topological sort order to ensure we don't
+ // go in a loop.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
+ return bb;
+ }
+ bb = pred_bb;
+ }
+}
+
+BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
+ DCHECK(!bb->predecessors.empty());
+ const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
+ DCHECK_LT(bb->id, indexes.size());
+ size_t best_idx = indexes[bb->id];
+ BasicBlockId best_id = NullBasicBlockId;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(pred_id, indexes.size());
+ if (best_idx > indexes[pred_id]) {
+ best_idx = indexes[pred_id];
+ best_id = pred_id;
+ }
+ }
+ // There must be at least one predecessor earlier than the bb.
+ DCHECK_LT(best_idx, indexes[bb->id]);
+ return mir_graph_->GetBasicBlock(best_id);
+}
+
+bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
+ DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
+ DCHECK(bb != nullptr);
+ DCHECK(bb->data_flow_info != nullptr);
+ DCHECK(bb->data_flow_info->live_in_v != nullptr);
+ if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
+ return false;
+ }
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
+ check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
+ InitializeCheckCastData(mir_graph, alloc)),
+ num_sregs_(
+ check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
+ ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
+ PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
+ current_method_signature_(
+ Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
+ sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
+ bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
+ InitializeSRegs();
+}
+
+bool TypeInference::Apply(BasicBlock* bb) {
+ bool changed = false;
+ uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
+ if (bb_df_attrs != 0u) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ if (bb_df_attrs & DF_NULL_TRANSFER_N) {
+ changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
+ }
+ }
+ MIR* mir = bb->first_mir_insn;
+ MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
+ for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
+ mir = mir->next) {
+ // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
+ // At least one input must have been previously processed. Look for the first
+ // occurrence of a high_word or low_word flag to determine the type.
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+ DCHECK_EQ(bb->predecessors.size(), num_uses);
+ Type merged_type = sregs_[defs[0]];
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ merged_type.MergeWeak(sregs_[input_mod_s_reg]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // No change
+ } else if (merged_type.HighWord()) {
+ // Ignore the high word phi, just remember if there's a size mismatch.
+ if (UNLIKELY(merged_type.LowWord())) {
+ sregs_[defs[0]].MarkSizeConflict();
+ }
+ } else {
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs_[defs[0]].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
+ }
+ }
+ }
+
+ // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
+ for (; mir != main_mirs_end; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ // Special handling for moves. Propagate type both ways.
+ if ((attrs & DF_IS_MOVE) != 0) {
+ int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
+ int32_t defd_mod_s_reg = defs[0];
+
+ // The "non-null" flag is propagated only downwards from actual definitions and it's
+ // not initially marked for moves, so used sreg must be marked before defined sreg.
+ // The only exception is an inlined move where we know the type from the original invoke.
+ DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
+ (mir->optimization_flags & MIR_CALLEE) != 0);
+ changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
+
+ // The value is the same, so either both registers are null or no register is.
+ // In any case we can safely propagate the array type down.
+ changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
+ // Mark type conflict: move instead of move-object.
+ sregs_[used_mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Handle AGET/APUT.
+ if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
+ int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
+ int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
+ DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
+ if (!sregs_[base_mod_s_reg].NonNull()) {
+ // If the base is null, don't propagate anything. All that we could determine
+ // has already been merged in the previous stage.
+ } else {
+ changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
+ Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
+ if ((attrs & DF_DA) != 0) {
+ changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
+ } else {
+ changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
+ }
+ }
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
+ // Mark type conflict: aget/aput instead of aget/aput-object.
+ sregs_[mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Special-case handling for check-cast to advance modified SSA reg.
+ if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ }
+ }
+
+ // Propagate types for IF_cc if present.
+ if (mir != nullptr) {
+ DCHECK(mir == bb->last_mir_insn);
+ DCHECK(mir->next == nullptr);
+ DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
+ DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
+ const int32_t* uses = mir->ssa_rep->uses;
+ int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
+ int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
+ changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
+ changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
+ }
+ }
+ return changed;
+}
+
+void TypeInference::Finish() {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MergeCheckCastConflicts(sregs_);
+ }
+
+ size_t num_sregs = mir_graph_->GetNumSSARegs(); // Without the extra SSA regs.
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ if (sregs_[s_reg].SizeConflict()) {
+ /*
+ * The dex bytecode definition does not explicitly outlaw the definition of the same
+ * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
+ * does not generate this pattern (at least recently). Further, in the next revision of
+ * dex, we will forbid this. To support the few cases in the wild, detect this pattern
+ * and punt to the interpreter.
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has size conflict block for sreg " << s_reg
+ << ", punting to interpreter.";
+ mir_graph_->SetPuntToInterpreter(true);
+ return;
+ }
+ }
+
+ size_t conflict_s_reg = 0;
+ bool type_conflict = false;
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ Type type = sregs_[s_reg];
+ RegLocation* loc = &mir_graph_->reg_location_[s_reg];
+ loc->wide = type.Wide();
+ loc->defined = type.IsDefined();
+ loc->fp = type.Fp();
+ loc->core = type.Core();
+ loc->ref = type.Ref();
+ loc->high_word = type.HighWord();
+ if (UNLIKELY(type.TypeConflict())) {
+ type_conflict = true;
+ conflict_s_reg = s_reg;
+ }
+ }
+
+ if (type_conflict) {
+ /*
+ * We don't normally expect to see a Dalvik register definition used both as a
+ * floating point and core value, though technically it could happen with constants.
+ * Until we have proper typing, detect this situation and disable register promotion
+ * (which relies on the distinction between core a fp usages).
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has type conflict block for sreg " << conflict_s_reg
+ << ", disabling register promotion.";
+ cu_->disable_opt |= (1 << kPromoteRegs);
+ }
+}
+
+TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
+ uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
+ Type result = Type::DexType(dex_file, type_idx);
+ return result;
+}
+
+TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetIFieldLoweringInfoCount();
+ Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
+ const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
+ ifields[i] = FieldType(current_dex_file, field_idx);
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
+ }
+ return ifields;
+}
+
+TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetSFieldLoweringInfoCount();
+ Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // FieldIndex() is always valid for static fields (no quickened instructions).
+ sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
+ }
+ return sfields;
+}
+
+TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
+ uint32_t method_idx,
+ bool is_static,
+ ScopedArenaAllocator* alloc) {
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
+ const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
+ size_t this_size = (is_static ? 0u : 1u);
+ size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
+ size_t size = this_size + param_size;
+ Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
+ if (!is_static) {
+ param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
+ }
+ for (size_t i = 0; i != param_size; ++i) {
+ uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
+ param_types[this_size + i] = Type::DexType(dex_file, type_idx);
+ }
+ return MethodSignature{ return_type, size, param_types }; // NOLINT
+}
+
+TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetMethodLoweringInfoCount();
+ MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
+ const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
+ uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
+ }
+ return signatures;
+}
+
+TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ if (!mir_graph->HasCheckCast()) {
+ return nullptr;
+ }
+
+ CheckCastData* data = nullptr;
+ const DexFile* dex_file = nullptr;
+ PreOrderDfsIterator iter(mir_graph);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
+ if (data == nullptr) {
+ data = new (alloc) CheckCastData(mir_graph, alloc);
+ dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
+ }
+ Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
+ data->AddCheckCast(mir, type);
+ }
+ }
+ }
+ if (data != nullptr) {
+ data->AddPseudoPhis();
+ }
+ return data;
+}
+
+void TypeInference::InitializeSRegs() {
+ std::fill_n(sregs_, num_sregs_, Type::Unknown());
+
+ /* Treat ArtMethod* as a normal reference */
+ sregs_[mir_graph_->GetMethodSReg()] = Type::NonArrayRefType();
+
+ // Initialize parameter SSA regs at method entry.
+ int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
+ for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i) {
+ Type param_type = current_method_signature_.param_types[i].AsNonNull();
+ sregs_[entry_param_s_reg] = param_type;
+ entry_param_s_reg += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
+ mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
+
+ // Initialize check-cast types.
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->InitializeCheckCastSRegs(sregs_);
+ }
+
+ // Initialize well-known SSA register definition types. Merge inferred types
+ // upwards where a single merge is enough (INVOKE arguments and return type,
+ // RETURN type, IPUT/SPUT source type).
+ // NOTE: Using topological sort order to make sure the definition comes before
+ // any upward merging. This allows simple assignment of the defined types
+ // instead of MergeStrong().
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ uint64_t bb_df_attrs = 0u;
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ }
+ // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ bb_df_attrs |= attrs;
+
+ const uint32_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ uint16_t opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ case Instruction::CONST_HIGH16:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::MOVE:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_FROM16:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_OBJECT_16:
+ if ((mir->optimization_flags & MIR_CALLEE) != 0) {
+ // Inlined const/move keeps method_lowering_info for type inference.
+ DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
+ Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
+ DCHECK(return_type.IsDefined()); // Method return type can't be void.
+ sregs_[defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ sregs_[defs[1]] = return_type.ToHighWord();
+ }
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case kMirOpPhi:
+ // These cannot be determined in this simple pass and will be processed later.
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
+ case Instruction::MOVE_RESULT_OBJECT:
+ // Nothing to do, handled with invoke-* or filled-new-array/-range.
+ break;
+ case Instruction::MOVE_EXCEPTION:
+ // NOTE: We can never catch an array.
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_CLASS:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CHECK_CAST:
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ break;
+ case Instruction::ARRAY_LENGTH:
+ sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
+ break;
+ case Instruction::NEW_INSTANCE:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::NEW_ARRAY:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
+ array_type.CheckPureRef(); // Previously checked by the method verifier.
+ DCHECK_NE(array_type.ArrayDepth(), 0u);
+ Type component_type = array_type.ComponentType();
+ DCHECK(!component_type.Wide());
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
+ sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
+ }
+ DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
+ for (size_t next = 0u; next != num_uses; ++next) {
+ int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
+ sregs_[input_mod_s_reg].MergeStrong(component_type);
+ }
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ Type return_type = signature->return_type;
+ sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
+ sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
+ }
+ }
+ size_t next = 0u;
+ for (size_t i = 0, size = signature->num_params; i != size; ++i) {
+ Type param_type = signature->param_types[i];
+ int32_t param_s_reg = ModifiedSReg(uses[next]);
+ DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
+ UpdateSRegFromLowWordType(param_s_reg, param_type);
+ next += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(next, num_uses);
+ DCHECK_EQ(next, mir->dalvikInsn.vA);
+ break;
+ }
+
+ case Instruction::RETURN_WIDE:
+ DCHECK(current_method_signature_.return_type.Wide());
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT: {
+ int32_t mod_s_reg = ModifiedSReg(uses[0]);
+ UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
+ break;
+ }
+
+ // NOTE: For AGET/APUT we set only the array type. The operand type is set
+ // below based on the data flow attributes.
+ case Instruction::AGET:
+ case Instruction::APUT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
+ break;
+ case Instruction::AGET_WIDE:
+ case Instruction::APUT_WIDE:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
+ break;
+ case Instruction::AGET_OBJECT:
+ sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
+ FALLTHROUGH_INTENDED;
+ case Instruction::APUT_OBJECT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
+ break;
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::APUT_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::APUT_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::APUT_SHORT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
+ break;
+
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::IGET:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_QUICK:
+ case Instruction::IGET_OBJECT_QUICK:
+ case Instruction::IGET_BOOLEAN_QUICK:
+ case Instruction::IGET_BYTE_QUICK:
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
+ break;
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::IPUT:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_QUICK:
+ case Instruction::IPUT_OBJECT_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ ifields_[mir->meta.ifield_lowering_info]);
+ break;
+ case Instruction::SGET_WIDE:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::SGET:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
+ break;
+ case Instruction::SPUT_WIDE:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::SPUT:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ sfields_[mir->meta.sfield_lowering_info]);
+ break;
+
+ default:
+ // No invokes or reference definitions here.
+ DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
+ DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
+ break;
+ }
+
+ if ((attrs & DF_NULL_TRANSFER_N) != 0) {
+ // Don't process Phis at this stage.
+ continue;
+ }
+
+ // Handle defs
+ if (attrs & DF_DA) {
+ int32_t s_reg = defs[0];
+ sregs_[s_reg].SetLowWord();
+ if (attrs & DF_FP_A) {
+ sregs_[s_reg].SetFp();
+ }
+ if (attrs & DF_CORE_A) {
+ sregs_[s_reg].SetCore();
+ }
+ if (attrs & DF_REF_A) {
+ sregs_[s_reg].SetRef();
+ }
+ if (attrs & DF_A_WIDE) {
+ sregs_[s_reg].SetWide();
+ DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
+ sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
+ } else {
+ sregs_[s_reg].SetNarrow();
+ }
+ }
+
+ // Handles uses
+ size_t next = 0;
+ #define PROCESS(REG) \
+ if (attrs & DF_U##REG) { \
+ int32_t mod_s_reg = ModifiedSReg(uses[next]); \
+ sregs_[mod_s_reg].SetLowWord(); \
+ if (attrs & DF_FP_##REG) { \
+ sregs_[mod_s_reg].SetFp(); \
+ } \
+ if (attrs & DF_CORE_##REG) { \
+ sregs_[mod_s_reg].SetCore(); \
+ } \
+ if (attrs & DF_REF_##REG) { \
+ sregs_[mod_s_reg].SetRef(); \
+ } \
+ if (attrs & DF_##REG##_WIDE) { \
+ sregs_[mod_s_reg].SetWide(); \
+ DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1])); \
+ sregs_[mod_s_reg + 1].SetWide(); \
+ sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]); \
+ next += 2; \
+ } else { \
+ sregs_[mod_s_reg].SetNarrow(); \
+ next++; \
+ } \
+ }
+ PROCESS(A)
+ PROCESS(B)
+ PROCESS(C)
+ #undef PROCESS
+ DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
+ }
+ // Record relevant attributes.
+ bb_df_attrs_[bb->id] = bb_df_attrs &
+ (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
+ }
+
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
+ }
+}
+
+int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
+ return split_data->current_mod_s_reg;
+ }
+ }
+ return s_reg;
+}
+
+int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
+ DCHECK_LT(pred_idx, bb->predecessors.size());
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
+ }
+ }
+ return s_reg;
+}
+
+bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
+ DCHECK(low_word_type.LowWord());
+ bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
+ if (!sregs_[mod_s_reg].Narrow()) { // Wide without conflict with narrow.
+ DCHECK(!low_word_type.Narrow());
+ DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs()); // Original SSA reg.
+ changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
+ }
+ return changed;
+}
+
+} // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
new file mode 100644
index 0000000000..c9b29bf7aa
--- /dev/null
+++ b/compiler/dex/type_inference.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
+#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
+
+#include "base/logging.h"
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+struct CompilationUnit;
+class DexFile;
+class MirFieldInfo;
+class MirMethodInfo;
+class MIR;
+class MIRGraph;
+
+/**
+ * @brief Determine the type of SSA registers.
+ *
+ * @details
+ * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
+ * out the sreg type. For some operations it is clear based on the opcode (i.e.
+ * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
+ *
+ * We perform the type inference operation in two phases:
+ * 1. First, we make one pass over all insns in the topological sort order and
+ * extract known type information from all insns for their defs and uses.
+ * 2. Then we repeatedly go through the graph to process insns that can propagate
+ * types from inputs to outputs and vice versa. These insns are just the MOVEs,
+ * AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
+ *
+ * Since the main purpose is to determine the basic FP/core/reference type, we don't
+ * need to record the precise reference type, we only record the array type to determine
+ * the result types of agets and source type of aputs.
+ *
+ * One complication is the check-cast instruction that effectively defines a new
+ * virtual register that has a different type than the original sreg. We need to
+ * track these virtual sregs and insert pseudo-phis where they merge.
+ *
+ * Another problems is with null references. The same zero constant can be used
+ * as differently typed null and moved around with move-object which would normally
+ * be an ill-formed assignment. So we need to keep track of values that can be null
+ * and values that cannot.
+ *
+ * Note that it's possible to have the same sreg show multiple defined types because dx
+ * treats constants as untyped bit patterns. We disable register promotion in that case.
+ */
+class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ bool Apply(BasicBlock* bb);
+ void Finish();
+
+ private:
+ struct Type {
+ static Type Unknown() {
+ return Type(0u);
+ }
+
+ static Type NonArrayRefType() {
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ }
+
+ static Type ObjectArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
+ }
+
+ static Type WideArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayWide);
+ }
+
+ static Type NarrowArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
+ }
+
+ static Type NarrowCoreArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
+ }
+
+ static Type UnknownArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
+ }
+
+ static Type ArrayType(uint32_t array_depth, Type nested_type);
+ static Type ArrayTypeFromComponent(Type component_type);
+ static Type ShortyType(char shorty);
+ static Type DexType(const DexFile* dex_file, uint32_t type_idx);
+
+ bool IsDefined() {
+ return raw_bits_ != 0u;
+ }
+
+ bool SizeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (Wide() && Narrow()) || (HighWord() && LowWord());
+ }
+
+ bool TypeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType); // 2+ bits.
+ }
+
+ void MarkSizeConflict() {
+ SetBits(kFlagLowWord | kFlagHighWord);
+ }
+
+ void MarkTypeConflict() {
+ // Mark all three type bits so that merging any other type bits will not change this type.
+ SetBits(kFlagFp | kFlagCore | kFlagRef);
+ }
+
+ void CheckPureRef() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
+ }
+
+ // If reference, don't treat as possible null and require precise type.
+ //
+ // References without this flag are allowed to have a type conflict and their
+ // type will not be propagated down. However, for simplicity we allow propagation
+ // of other flags up as it will affect only other null references; should those
+ // references be marked non-null later, we would have to do it anyway.
+ // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
+ // to simplify merging together with other non-array flags.
+ bool NonNull() const {
+ return IsBitSet(kFlagNonNull);
+ }
+
+ bool Wide() const {
+ return IsBitSet(kFlagWide);
+ }
+
+ bool Narrow() const {
+ return IsBitSet(kFlagNarrow);
+ }
+
+ bool Fp() const {
+ return IsBitSet(kFlagFp);
+ }
+
+ bool Core() const {
+ return IsBitSet(kFlagCore);
+ }
+
+ bool Ref() const {
+ return IsBitSet(kFlagRef);
+ }
+
+ bool LowWord() const {
+ return IsBitSet(kFlagLowWord);
+ }
+
+ bool HighWord() const {
+ return IsBitSet(kFlagHighWord);
+ }
+
+ uint32_t ArrayDepth() const {
+ return raw_bits_ >> kBitArrayDepthStart;
+ }
+
+ Type NestedType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
+ }
+
+ Type ComponentType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ Type temp(raw_bits_ - (1u << kBitArrayDepthStart)); // array_depth - 1u;
+ return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
+ }
+
+ void SetWide() {
+ SetBits(kFlagWide);
+ }
+
+ void SetNarrow() {
+ SetBits(kFlagNarrow);
+ }
+
+ void SetFp() {
+ SetBits(kFlagFp);
+ }
+
+ void SetCore() {
+ SetBits(kFlagCore);
+ }
+
+ void SetRef() {
+ SetBits(kFlagRef);
+ }
+
+ void SetLowWord() {
+ SetBits(kFlagLowWord);
+ }
+
+ void SetHighWord() {
+ SetBits(kFlagHighWord);
+ }
+
+ Type ToHighWord() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
+ return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
+ }
+
+ bool MergeHighWord(Type low_word_type) {
+ // NOTE: low_word_type may be also Narrow() or HighWord().
+ DCHECK(low_word_type.Wide() && low_word_type.LowWord());
+ return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
+ kMaskWideAndType | kFlagHighWord);
+ }
+
+ bool Copy(Type type) {
+ if (raw_bits_ != type.raw_bits_) {
+ raw_bits_ = type.raw_bits_;
+ return true;
+ }
+ return false;
+ }
+
+ // Merge non-array flags.
+ bool MergeNonArrayFlags(Type src_type) {
+ return MergeBits(src_type, kMaskNonArray);
+ }
+
+ // Merge array flags for conflict.
+ bool MergeArrayConflict(Type src_type);
+
+ // Merge all flags.
+ bool MergeStrong(Type src_type);
+
+ // Merge all flags.
+ bool MergeWeak(Type src_type);
+
+ // Get the same type but mark that it should not be treated as null.
+ Type AsNonNull() const {
+ return Type(raw_bits_ | kFlagNonNull);
+ }
+
+ // Get the same type but mark that it can be treated as null.
+ Type AsNull() const {
+ return Type(raw_bits_ & ~kFlagNonNull);
+ }
+
+ private:
+ enum FlagBits {
+ kBitNonNull = 0,
+ kBitWide,
+ kBitNarrow,
+ kBitFp,
+ kBitCore,
+ kBitRef,
+ kBitLowWord,
+ kBitHighWord,
+ kBitArrayWide,
+ kBitArrayNarrow,
+ kBitArrayFp,
+ kBitArrayCore,
+ kBitArrayRef,
+ kBitArrayDepthStart,
+ };
+ static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
+
+ static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
+ static constexpr uint32_t kFlagWide = 1u << kBitWide;
+ static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
+ static constexpr uint32_t kFlagFp = 1u << kBitFp;
+ static constexpr uint32_t kFlagCore = 1u << kBitCore;
+ static constexpr uint32_t kFlagRef = 1u << kBitRef;
+ static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
+ static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
+ static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
+ static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
+ static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
+ static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
+ static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
+
+ static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
+ static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
+ static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
+ static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
+ static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
+ static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
+ static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
+
+ static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
+ static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
+ static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
+ static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
+ static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
+
+ static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
+ static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
+
+ // The maximum representable array depth. If we exceed the maximum (which can happen
+ // only with an absurd nested array type in a dex file which would presumably cause
+ // OOM while being resolved), we can report false conflicts.
+ static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
+
+ explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
+
+ bool IsBitSet(uint32_t flag) const {
+ return (raw_bits_ & flag) != 0u;
+ }
+
+ void SetBits(uint32_t flags) {
+ raw_bits_ |= flags;
+ }
+
+ bool MergeBits(Type src_type, uint32_t mask) {
+ uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
+ if (new_bits != raw_bits_) {
+ raw_bits_ = new_bits;
+ return true;
+ }
+ return false;
+ }
+
+ uint32_t raw_bits_;
+ };
+
+ struct MethodSignature {
+ Type return_type;
+ size_t num_params;
+ Type* param_types;
+ };
+
+ struct SplitSRegData {
+ int32_t current_mod_s_reg;
+ int32_t* starting_mod_s_reg; // Indexed by BasicBlock::id.
+ int32_t* ending_mod_s_reg; // Indexed by BasicBlock::id.
+
+ // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
+ // with check-casts and the block with the original SSA reg.
+ // After AddPseudoPhis(), it marks blocks with pseudo-phis.
+ ArenaBitVector* def_phi_blocks_; // Indexed by BasicBlock::id.
+ };
+
+ class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ size_t NumSRegs() const {
+ return num_sregs_;
+ }
+
+ void AddCheckCast(MIR* check_cast, Type type);
+ void AddPseudoPhis();
+ void InitializeCheckCastSRegs(Type* sregs) const;
+ void MergeCheckCastConflicts(Type* sregs) const;
+ void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
+
+ void Start(BasicBlock* bb);
+ bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
+ void ProcessCheckCast(MIR* mir);
+
+ SplitSRegData* GetSplitSRegData(int32_t s_reg);
+
+ private:
+ BasicBlock* FindDefBlock(MIR* check_cast);
+ BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
+ bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
+
+ MIRGraph* const mir_graph_;
+ ScopedArenaAllocator* const alloc_;
+ const size_t num_blocks_;
+ size_t num_sregs_;
+
+ // Map check-cast mir to special sreg and type.
+ struct CheckCastMapValue {
+ int32_t modified_s_reg;
+ Type type;
+ };
+ ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
+ ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
+ };
+
+ static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
+ static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ void InitializeSRegs();
+
+ int32_t ModifiedSReg(int32_t s_reg);
+ int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
+
+ bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
+
+ MIRGraph* const mir_graph_;
+ CompilationUnit* const cu_;
+
+ // The type inference propagates types also backwards but this must not happen across
+ // check-cast. So we need to effectively split an SSA reg into two at check-cast and
+ // keep track of the types separately.
+ std::unique_ptr<CheckCastData> check_cast_data_;
+
+ size_t num_sregs_; // Number of SSA regs or modified SSA regs, see check-cast.
+ const Type* const ifields_; // Indexed by MIR::meta::ifield_lowering_info.
+ const Type* const sfields_; // Indexed by MIR::meta::sfield_lowering_info.
+ const MethodSignature* const signatures_; // Indexed by MIR::meta::method_lowering_info.
+ const MethodSignature current_method_signature_;
+ Type* const sregs_; // Indexed by SSA reg or modified SSA reg, see check-cast.
+ uint64_t* const bb_df_attrs_; // Indexed by BasicBlock::id.
+
+ friend class TypeInferenceTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
new file mode 100644
index 0000000000..eaa2bfac93
--- /dev/null
+++ b/compiler/dex/type_inference_test.cc
@@ -0,0 +1,2044 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex/mir_field_info.h"
+#include "dex/mir_graph.h"
+#include "driver/dex_compilation_unit.h"
+#include "gtest/gtest.h"
+#include "type_inference.h"
+#include "utils/test_dex_file_builder.h"
+
+namespace art {
+
+class TypeInferenceTest : public testing::Test {
+ protected:
+ struct TypeDef {
+ const char* descriptor;
+ };
+
+ struct FieldDef {
+ const char* class_descriptor;
+ const char* type;
+ const char* name;
+ };
+
+ struct MethodDef {
+ const char* class_descriptor;
+ const char* signature;
+ const char* name;
+ InvokeType type;
+ };
+
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+ struct MIRDef {
+ static constexpr size_t kMaxSsaDefs = 2;
+ static constexpr size_t kMaxSsaUses = 4;
+
+ BasicBlockId bbid;
+ Instruction::Code opcode;
+ int64_t value;
+ uint32_t metadata;
+ size_t num_uses;
+ int32_t uses[kMaxSsaUses];
+ size_t num_defs;
+ int32_t defs[kMaxSsaDefs];
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+ { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE0(bb, opcode, method_idx) \
+ { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
+ { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
+#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
+ { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
+#define DEF_IFZ(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+ { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
+#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
+#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
+#define DEF_CHECK_CAST(bb, opcode, reg, type) \
+ { bb, opcode, 0, type, 1, { reg }, 0, { } }
+#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
+ { bb, opcode, 0, type, 1, { length }, 1, { reg } }
+
+ void AddTypes(const TypeDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const TypeDef* def = &defs[i];
+ dex_file_builder_.AddType(def->descriptor);
+ }
+ }
+
+ template <size_t count>
+ void PrepareTypes(const TypeDef (&defs)[count]) {
+ type_defs_ = defs;
+ type_count_ = count;
+ AddTypes(defs, count);
+ }
+
+ void AddFields(const FieldDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const FieldDef* def = &defs[i];
+ dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareIFields(const FieldDef (&defs)[count]) {
+ ifield_defs_ = defs;
+ ifield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ template <size_t count>
+ void PrepareSFields(const FieldDef (&defs)[count]) {
+ sfield_defs_ = defs;
+ sfield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ void AddMethods(const MethodDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const MethodDef* def = &defs[i];
+ dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareMethods(const MethodDef (&defs)[count]) {
+ method_defs_ = defs;
+ method_count_ = count;
+ AddMethods(defs, count);
+ }
+
+ DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
+ switch (descriptor[0]) {
+ case 'I':
+ case 'F':
+ return kDexMemAccessWord;
+ case 'J':
+ case 'D':
+ return kDexMemAccessWide;
+ case '[':
+ case 'L':
+ return kDexMemAccessObject;
+ case 'Z':
+ return kDexMemAccessBoolean;
+ case 'B':
+ return kDexMemAccessByte;
+ case 'C':
+ return kDexMemAccessChar;
+ case 'S':
+ return kDexMemAccessShort;
+ default:
+ LOG(FATAL) << "Bad descriptor: " << descriptor;
+ UNREACHABLE();
+ }
+ }
+
+ size_t CountIns(const std::string& test_method_signature, bool is_static) {
+ const char* sig = test_method_signature.c_str();
+ CHECK_EQ(sig[0], '(');
+ ++sig;
+ size_t result = is_static ? 0u : 1u;
+ while (*sig != ')') {
+ result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
+ while (*sig == '[') {
+ ++sig;
+ }
+ if (*sig == 'L') {
+ do {
+ ++sig;
+ CHECK(*sig != '\0' && *sig != ')');
+ } while (*sig != ';');
+ }
+ ++sig;
+ }
+ return result;
+ }
+
+ void BuildDexFile(const std::string& test_method_signature, bool is_static) {
+ dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
+ dex_file_ = dex_file_builder_.Build(kDexLocation);
+ cu_.dex_file = dex_file_.get();
+ cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
+ cu_.access_flags = is_static ? kAccStatic : 0u;
+ cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
+ &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
+ 0u /* class_def_idx not used */, 0u /* method_index not used */,
+ cu_.access_flags, nullptr /* verified_method not used */));
+ cu_.mir_graph->current_method_ = 0u;
+ code_item_ = static_cast<DexFile::CodeItem*>(
+ cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+
+ code_item_->ins_size_ = CountIns(test_method_signature, is_static);
+ code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
+ cu_.mir_graph->current_code_item_ = code_item_;
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
+
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != ifield_count_; ++i) {
+ const FieldDef* def = &ifield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
+ for (size_t i = 0u; i != sfield_count_; ++i) {
+ const FieldDef* def = &sfield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->method_lowering_infos_.clear();
+ cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != method_count_; ++i) {
+ const MethodDef* def = &method_defs_[i];
+ uint32_t method_idx =
+ dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
+ MirMethodLoweringInfo method_info(method_idx, def->type, false);
+ method_info.declaring_dex_file_ = cu_.dex_file;
+ method_info.declaring_method_idx_ = method_idx;
+ cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+ }
+ }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.clear();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks.reserve(def->num_successors);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks.push_back(successor_block_info);
+ }
+ }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
+ }
+ }
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ void PrepareSingleBlock() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareDiamond() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareLoop() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+ mir_count_ = count;
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
+ ssa_reps_.resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const MIRDef* def = &defs[i];
+ MIR* mir = &mirs_[i];
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+ bb->AppendMIR(mir);
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+ mir->dalvikInsn.vB_wide = def->value;
+ if (IsInstructionIGetOrIPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
+ } else if (IsInstructionInvoke(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
+ mir->meta.method_lowering_info = def->metadata;
+ mir->dalvikInsn.vA = def->num_uses;
+ cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
+ } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+ mir->meta.phi_incoming =
+ allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+ ASSERT_EQ(def->num_uses, bb->predecessors.size());
+ std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+ } else if (def->opcode == Instruction::CHECK_CAST) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
+ } else if (def->opcode == Instruction::NEW_ARRAY) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ }
+ mir->ssa_rep = &ssa_reps_[i];
+ mir->ssa_rep->num_uses = def->num_uses;
+ mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
+ mir->ssa_rep->num_defs = def->num_defs;
+ mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->offset = i; // LVN uses offset only for debug output
+ mir->optimization_flags = 0u;
+ }
+ code_item_->insns_size_in_code_units_ = 2u * count;
+ }
+
+ template <size_t count>
+ void PrepareMIRs(const MIRDef (&defs)[count]) {
+ DoPrepareMIRs(defs, count);
+ }
+
+ // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
+ void AllocEndingVRegToSRegMaps() {
+ AllNodesIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
+ size_t num_vregs = code_item_->registers_size_;
+ bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+ cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
+ std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
+ }
+ }
+ }
+ }
+
+ template <size_t count>
+ void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
+ AllocEndingVRegToSRegMaps();
+ for (BasicBlockId bb_id : bb_ids) {
+ BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+ CHECK(bb != nullptr);
+ CHECK(bb->data_flow_info != nullptr);
+ CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
+ }
+ }
+
+ void PerformTypeInference() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ ASSERT_TRUE(type_inference_ == nullptr);
+ type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
+ allocator_.get()));
+ RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
+ bool changed = false;
+ for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
+ changed = type_inference_->Apply(bb);
+ }
+ type_inference_->Finish();
+ }
+
+ TypeInferenceTest()
+ : pool_(),
+ cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+ mir_count_(0u),
+ mirs_(nullptr),
+ code_item_(nullptr),
+ ssa_reps_(),
+ allocator_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)),
+ type_defs_(nullptr),
+ type_count_(0u),
+ ifield_defs_(nullptr),
+ ifield_count_(0u),
+ sfield_defs_(nullptr),
+ sfield_count_(0u),
+ method_defs_(nullptr),
+ method_count_(0u),
+ dex_file_builder_(),
+ dex_file_(nullptr) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+ kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ cu_.mir_graph->method_sreg_ = kMaxSsaRegs - 1u;
+ cu_.mir_graph->reg_location_[cu_.mir_graph->GetMethodSReg()].location = kLocCompilerTemp;
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_.push_back(i);
+ cu_.mir_graph->ssa_subscripts_.push_back(0);
+ }
+ }
+
+ enum ExpectFlags : uint32_t {
+ kExpectWide = 0x0001u,
+ kExpectNarrow = 0x0002u,
+ kExpectFp = 0x0004u,
+ kExpectCore = 0x0008u,
+ kExpectRef = 0x0010u,
+ kExpectArrayWide = 0x0020u,
+ kExpectArrayNarrow = 0x0040u,
+ kExpectArrayFp = 0x0080u,
+ kExpectArrayCore = 0x0100u,
+ kExpectArrayRef = 0x0200u,
+ kExpectNull = 0x0400u,
+ kExpectHigh = 0x0800u, // Reserved for ExpectSRegType().
+ };
+
+ struct SRegExpectation {
+ uint32_t array_depth;
+ uint32_t flags;
+ };
+
+ void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
+ uint32_t flags = expectation.flags;
+ uint32_t array_depth = expectation.array_depth;
+ TypeInference::Type type = type_inference_->sregs_[s_reg];
+
+ if (check_loc) {
+ RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
+ EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
+ }
+
+ EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
+
+ if ((flags & kExpectRef) != 0u) {
+ EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
+ } else {
+ // Null should be checked only for references.
+ ASSERT_EQ((flags & kExpectNull), 0u);
+ }
+
+ ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
+ if (array_depth != 0u) {
+ ASSERT_NE((flags & kExpectRef), 0u);
+ TypeInference::Type nested_type = type.NestedType();
+ EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
+ }
+ if (!type.Narrow() && type.LowWord() &&
+ (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
+ SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
+ ExpectSRegType(s_reg + 1, high_expectation);
+ }
+ }
+
+ void ExpectCore(int s_reg, bool core) {
+ EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
+ }
+
+ void ExpectRef(int s_reg, bool ref) {
+ EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
+ }
+
+ void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
+ EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
+ }
+
+ static constexpr size_t kMaxSsaRegs = 16384u;
+ static constexpr uint16_t kLocalVRs = 1000u;
+
+ static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
+ static constexpr const char* kClassName = "LTypeInferenceTest;";
+ static constexpr const char* kMethodName = "test";
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t mir_count_;
+ MIR* mirs_;
+ DexFile::CodeItem* code_item_;
+ std::vector<SSARepresentation> ssa_reps_;
+ std::unique_ptr<ScopedArenaAllocator> allocator_;
+ std::unique_ptr<TypeInference> type_inference_;
+ ArenaBitVector* live_in_v_;
+
+ const TypeDef* type_defs_;
+ size_t type_count_;
+ const FieldDef* ifield_defs_;
+ size_t ifield_count_;
+ const FieldDef* sfield_defs_;
+ size_t sfield_count_;
+ const MethodDef* method_defs_;
+ size_t method_count_;
+
+ TestDexFileBuilder dex_file_builder_;
+ std::unique_ptr<const DexFile> dex_file_;
+};
+
+TEST_F(TypeInferenceTest, IGet) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
+ DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
+ DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
+ DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
+ DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
+ DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGet) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
+ DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
+ DEF_SGET(3u, Instruction::SGET, 4u, 3u),
+ DEF_SGET(3u, Instruction::SGET, 5u, 4u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
+ DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
+ DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IPut) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodReturnType) {
+ static const MethodDef methods[] = {
+ { kClassName, "()B", "byteFoo", kStatic },
+ { kClassName, "()C", "charFoo", kStatic },
+ { kClassName, "()D", "doubleFoo", kStatic },
+ { kClassName, "()F", "floatFoo", kStatic },
+ { kClassName, "()I", "intFoo", kStatic },
+ { kClassName, "()J", "longFoo", kStatic },
+ { kClassName, "()S", "shortFoo", kStatic },
+ { kClassName, "()Z", "booleanFoo", kStatic },
+ { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
+ { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodArgType) {
+ static const MethodDef methods[] = {
+ { kClassName, "(B)V", "fooByte", kStatic },
+ { kClassName, "(C)V", "fooChar", kStatic },
+ { kClassName, "(D)V", "fooDouble", kStatic },
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ { kClassName, "(I)V", "fooInt", kStatic },
+ { kClassName, "(J)V", "fooLong", kStatic },
+ { kClassName, "(S)V", "fooShort", kStatic },
+ { kClassName, "(Z)V", "fooBoolean", kStatic },
+ { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
+ { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // value; can't even determine whether core or fp.
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] value
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut3) {
+ static const MIRDef mirs[] = {
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array1
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] array2
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut4) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // value; can't even determine whether core or fp.
+ DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut5) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // Object[] value
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut6) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // Object[] array2
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // Object[] array1 = ((Object[])null)[0];
+ // Object[] array2 = ((Object[])null)[0];
+ // for (int i = 0; i != 3; ++i) {
+ // Object[] a1 = null; // One of these could be Object[][] but not both.
+ // Object[] a2 = null; // But they will be deduced as Object[].
+ // try { a1[0] = a2; } catch (Throwable ignored) { }
+ // try { a2[0] = a1; } catch (Throwable ignored) { }
+ // array1 = a1;
+ // array2 = a2;
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // null
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // array2
+ DEF_PHI2(4u, 4u, 2u, 8u), // ? + [L -> [? gives [L (see array-length below)
+ DEF_PHI2(4u, 5u, 3u, 9u), // ? + [L -> ? gives ?
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u), // a1
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u), // a2
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
+ DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayFloat) {
+ static const MethodDef methods[] = {
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // try {
+ // float[][][] aaaf = null;
+ // float[][] array = aaaf[0]; // Make sure array is treated as properly typed.
+ // array[0][0] = 0.0f; // const + aget-object[1] + aput
+ // fooFloat(array[0][0]); // aget-object[2] + aget + invoke
+ // // invoke: signature => input is F.
+ // // aget: output is F => base is [F (precise)
+ // // aget-object[2]: output is [F => base is [[F (precise)
+ // // aput: unknown input type => base is [?
+ // // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
+ // // aput (again): base is [F => result is F
+ // // const: F determined by the aput reprocessing.
+ // } catch (Throwable ignored) {
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // 0
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // aaaf
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u), // array = aaaf[0]
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u), // array[0]
+ DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u), // array[0][0] = 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u), // array[0]
+ DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u), // array[0][0]
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u), // fooFloat(array[0][0])
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into L infers only L but not [.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into [? infers [I.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into L correctly leaves it as L.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [I and [I infers only L but not [.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi2) {
+ static const TypeDef types[] = {
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [F and [F into [? infers [F.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Phi from [I and [F infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi4) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_CONST(5u, Instruction::CONST, 2u, 0),
+ // Pseudo-phi from [I and null infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Wide1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // long[]
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0), // long
+ DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
+ { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
+ };
+
+ BuildDexFile("()[J", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectWide },
+ // NOTE: High word checked implicitly for sreg = 3.
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, WideSizeConflict1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectNarrow | kExpectWide },
+ { 0u, kExpectNarrow | kExpectWide },
+ };
+ ExpectSRegType(0u, expectations[0], false);
+ ExpectSRegType(2u, expectations[1], false);
+ EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayLongLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGetAdd0SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "staticIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u), // +0
+ DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveObjectNull) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull1) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ // Type conflict using move instead of move-object for null, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[F", "staticArrayArrayFloatField" },
+ { kClassName, "[I", "staticArrayIntField" },
+ { kClassName, "[[I", "staticArrayArrayIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
+ DEF_PHI2(6u, 6u, 0u, 3u),
+ DEF_PHI2(6u, 7u, 1u, 4u),
+ DEF_PHI2(6u, 8u, 2u, 5u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
+ { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()[[I", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull1) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[I", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArgIsNonNull) {
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
+ };
+
+ BuildDexFile("(Ljava/lang/Object;)V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 0u,
+ kExpectRef | kExpectNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IfCc) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "intField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0u),
+ { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", false);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+} // namespace art
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e203..c1d5cb7213 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@ VerificationResults::~VerificationResults() {
}
bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
- DCHECK(method_verifier != NULL);
+ DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757fd3e..7eba515200 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@ void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier,
}
}
} else {
- DCHECK(i >= 65536 || reg_bitmap == NULL);
+ DCHECK(i >= 65536 || reg_bitmap == nullptr);
}
}
}
@@ -283,7 +283,7 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
}
mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if (abstract_method == NULL) {
+ if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52437..ad07639b1c 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@ class VerifiedMethod {
return safe_cast_set_;
}
- // Returns the devirtualization target method, or nullptr if none.
+ // Returns the devirtualization target method, or null if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
// Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38f5a..948ba7b273 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -23,400 +23,6 @@
namespace art {
-bool MIRGraph::SetFp(int index, bool is_fp) {
- bool change = false;
- if (is_fp && !reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetFp(int index) {
- bool change = false;
- if (!reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index, bool is_core) {
- bool change = false;
- if (is_core && !reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index, bool is_ref) {
- bool change = false;
- if (is_ref && !reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index, bool is_wide) {
- bool change = false;
- if (is_wide && !reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index) {
- bool change = false;
- if (!reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index, bool is_high) {
- bool change = false;
- if (is_high && !reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index) {
- bool change = false;
- if (!reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-
-/*
- * Infer types and sizes. We don't need to track change on sizes,
- * as it doesn't propagate. We're guaranteed at least one pass through
- * the cfg.
- */
-bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
- SSARepresentation *ssa_rep = mir->ssa_rep;
-
- /*
- * The dex bytecode definition does not explicitly outlaw the definition of the same
- * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
- * does not generate this pattern (at least recently). Further, in the next revision of
- * dex, we will forbid this. To support the few cases in the wild, detect this pattern
- * and punt to the interpreter.
- */
- bool type_mismatch = false;
-
- if (ssa_rep) {
- uint64_t attrs = GetDataFlowAttributes(mir);
- const int* uses = ssa_rep->uses;
- const int* defs = ssa_rep->defs;
-
- // Handle defs
- if (attrs & DF_DA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(defs[0]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(defs[0]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[defs[0]].wide = true;
- reg_location_[defs[1]].wide = true;
- reg_location_[defs[1]].high_word = true;
- DCHECK_EQ(SRegToVReg(defs[0])+1,
- SRegToVReg(defs[1]));
- }
- }
-
-
- // Handles uses
- int next = 0;
- if (attrs & DF_UA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UB) {
- if (attrs & DF_CORE_B) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_B) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_B_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UC) {
- if (attrs & DF_CORE_C) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_C) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_C_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- }
- }
-
- // Special-case return handling
- if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch (cu_->shorty[0]) {
- case 'I':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetCore(uses[0]);
- break;
- case 'J':
- changed |= SetCore(uses[0]);
- changed |= SetCore(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetFp(uses[0]);
- break;
- case 'D':
- changed |= SetFp(uses[0]);
- changed |= SetFp(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetRef(uses[0]);
- break;
- default: break;
- }
- }
-
- // Special-case handling for format 35c/3rc invokes
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
- 0 : mir->dalvikInsn.FlagsOf();
- if ((flags & Instruction::kInvoke) &&
- (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
- DCHECK_EQ(next, 0);
- const auto& lowering_info = GetMethodLoweringInfo(mir);
- const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
- // Handle result type if floating point
- if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- MIR* move_result_mir = FindMoveResult(bb, mir);
- // Result might not be used at all, so no move-result
- if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
- Instruction::MOVE_RESULT_OBJECT)) {
- SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
- DCHECK(tgt_rep != NULL);
- tgt_rep->fp_def[0] = true;
- changed |= SetFp(tgt_rep->defs[0]);
- if (shorty[0] == 'D') {
- tgt_rep->fp_def[1] = true;
- changed |= SetFp(tgt_rep->defs[1]);
- }
- }
- }
- int num_uses = mir->dalvikInsn.vA;
- // If this is a non-static invoke, mark implicit "this"
- if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
- reg_location_[uses[next]].defined = true;
- reg_location_[uses[next]].ref = true;
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- uint32_t cpos = 1;
- if (strlen(shorty) > 1) {
- for (int i = next; i < num_uses;) {
- DCHECK_LT(cpos, strlen(shorty));
- switch (shorty[cpos++]) {
- case 'D':
- ssa_rep->fp_use[i] = true;
- ssa_rep->fp_use[i+1] = true;
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- i++;
- break;
- case 'J':
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- changed |= SetCore(uses[i]);
- i++;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[i]].wide;
- ssa_rep->fp_use[i] = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetRef(uses[i]);
- break;
- default:
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetCore(uses[i]);
- break;
- }
- i++;
- }
- }
- }
-
- for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
- if (ssa_rep->fp_use[i]) {
- changed |= SetFp(uses[i]);
- }
- }
- for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
- if (ssa_rep->fp_def[i]) {
- changed |= SetFp(defs[i]);
- }
- }
- // Special-case handling for moves & Phi
- if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
- /*
- * If any of our inputs or outputs is defined, set all.
- * Some ugliness related to Phi nodes and wide values.
- * The Phi set will include all low words or all high
- * words, so we have to treat them specially.
- */
- bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
- RegLocation rl_temp = reg_location_[defs[0]];
- bool defined_fp = rl_temp.defined && rl_temp.fp;
- bool defined_core = rl_temp.defined && rl_temp.core;
- bool defined_ref = rl_temp.defined && rl_temp.ref;
- bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
- bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- rl_temp = reg_location_[uses[i]];
- defined_fp |= rl_temp.defined && rl_temp.fp;
- defined_core |= rl_temp.defined && rl_temp.core;
- defined_ref |= rl_temp.defined && rl_temp.ref;
- is_wide |= rl_temp.wide;
- is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
- }
- /*
- * We don't normally expect to see a Dalvik register definition used both as a
- * floating point and core value, though technically it could happen with constants.
- * Until we have proper typing, detect this situation and disable register promotion
- * (which relies on the distinction between core a fp usages).
- */
- if ((defined_fp && (defined_core | defined_ref)) &&
- ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
- LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
- << " op at block " << bb->id
- << " has both fp and core/ref uses for same def.";
- cu_->disable_opt |= (1 << kPromoteRegs);
- }
- changed |= SetFp(defs[0], defined_fp);
- changed |= SetCore(defs[0], defined_core);
- changed |= SetRef(defs[0], defined_ref);
- changed |= SetWide(defs[0], is_wide);
- changed |= SetHigh(defs[0], is_high);
- if (attrs & DF_A_WIDE) {
- changed |= SetWide(defs[1]);
- changed |= SetHigh(defs[1]);
- }
-
- bool has_ins = (GetNumOfInVRs() > 0);
-
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- if (has_ins && IsInVReg(uses[i])) {
- // NB: The SSA name for the first def of an in-reg will be the same as
- // the reg's actual name.
- if (!reg_location_[uses[i]].fp && defined_fp) {
- // If we were about to infer that this first def of an in-reg is a float
- // when it wasn't previously (because float/int is set during SSA initialization),
- // do not allow this to happen.
- continue;
- }
- }
- changed |= SetFp(uses[i], defined_fp);
- changed |= SetCore(uses[i], defined_core);
- changed |= SetRef(uses[i], defined_ref);
- changed |= SetWide(uses[i], is_wide);
- changed |= SetHigh(uses[i], is_high);
- }
- if (attrs & DF_A_WIDE) {
- DCHECK_EQ(ssa_rep->num_uses, 2);
- changed |= SetWide(uses[1]);
- changed |= SetHigh(uses[1]);
- }
- }
- }
- if (type_mismatch) {
- LOG(WARNING) << "Deprecated dex type mismatch, interpreting "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "@ 0x" << std::hex << mir->offset;
- SetPuntToInterpreter(true);
- }
- return changed;
-}
-
static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
@@ -446,66 +52,12 @@ void MIRGraph::InitRegLocations() {
loc[i] = fresh_loc;
loc[i].s_reg_low = i;
loc[i].is_const = false; // Constants will be marked by constant propagation pass later.
- loc[i].wide = false;
}
- /* Treat Method* as a normal reference */
- int method_sreg = GetMethodSReg();
- loc[method_sreg].ref = true;
- loc[method_sreg].location = kLocCompilerTemp;
- loc[method_sreg].defined = true;
+ /* Mark the location of ArtMethod* as temporary */
+ loc[GetMethodSReg()].location = kLocCompilerTemp;
reg_location_ = loc;
-
- int num_regs = GetNumOfCodeVRs();
-
- /* Add types of incoming arguments based on signature */
- int num_ins = GetNumOfInVRs();
- if (num_ins > 0) {
- int s_reg = num_regs - num_ins;
- if ((cu_->access_flags & kAccStatic) == 0) {
- // For non-static, skip past "this"
- reg_location_[s_reg].defined = true;
- reg_location_[s_reg].ref = true;
- s_reg++;
- }
- const char* shorty = cu_->shorty;
- int shorty_len = strlen(shorty);
- for (int i = 1; i < shorty_len; i++) {
- switch (shorty[i]) {
- case 'D':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- reg_location_[s_reg+1].fp = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'J':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'F':
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- break;
- case 'L':
- reg_location_[s_reg].ref = true;
- reg_location_[s_reg].defined = true;
- break;
- default:
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- break;
- }
- s_reg++;
- }
- }
}
/*
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d46954f1..bad83359d7 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@ inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
}
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// ClassLinker can return a field of the wrong kind directly from the DexCache.
- // Silently return nullptr on such incompatible class change.
+ // Silently return null on such incompatible class change.
return nullptr;
}
return resolved_field;
@@ -206,7 +206,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
}
if (check_incompatible_class_change &&
UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
- // Silently return nullptr on incompatible class change.
+ // Silently return null on incompatible class change.
return nullptr;
}
return resolved_method;
@@ -302,7 +302,7 @@ inline int CompilerDriver::IsFastInvoke(
target_dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(), kVirtual);
}
- CHECK(called_method != NULL);
+ CHECK(called_method != nullptr);
CHECK(!called_method->IsAbstract());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e665e1d4e8..c858326562 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -495,7 +495,8 @@ void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+ std::unique_ptr<ThreadPool> thread_pool(
+ new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -2101,7 +2102,8 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+ size_t class_def_index) {
ATRACE_CALL();
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2251,7 +2253,7 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
// Is eligable for compilation by methods-to-compile filter.
IsMethodToCompile(method_ref);
if (compile) {
- // NOTE: if compiler declines to compile this method, it will return nullptr.
+ // NOTE: if compiler declines to compile this method, it will return null.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 50e1fb14e5..03c5c5c352 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,7 +94,7 @@ class CompilerDriver {
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
- // can assume will be in the image, with nullptr implying all available
+ // can assume will be in the image, with null implying all available
// classes.
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -228,7 +228,7 @@ class CompilerDriver {
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve compiling method's class. Returns nullptr on failure.
+ // Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -240,7 +240,7 @@ class CompilerDriver {
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
ArtField* ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -290,7 +290,7 @@ class CompilerDriver {
ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a method. Returns nullptr on failure, including incompatible class change.
+ // Resolve a method. Returns null on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -592,16 +592,16 @@ class CompilerDriver {
const bool image_;
// If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is nullptr, all classes are
+ // the image. Note if image_classes_ is null, all classes are
// included in the image.
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- // Specifies the classes that will be compiled. Note that if classes_to_compile_ is nullptr,
+ // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
- // Specifies the methods that will be compiled. Note that if methods_to_compile_ is nullptr,
+ // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
// all methods are eligible for compilation (compilation filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index ded50ca105..5085f32aec 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@ class CompilerDriverTest : public CommonCompilerTest {
CHECK(started);
env_ = Thread::Current()->GetJniEnv();
class_ = env_->FindClass(class_name);
- CHECK(class_ != NULL) << "Class not found: " << class_name;
+ CHECK(class_ != nullptr) << "Class not found: " << class_name;
if (is_virtual) {
mid_ = env_->GetMethodID(class_, method, signature);
} else {
mid_ = env_->GetStaticMethodID(class_, method, signature);
}
- CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+ CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
}
void MakeAllExecutable(jobject class_loader) {
const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
MakeDexFileExecutable(class_loader, *dex_file);
}
}
@@ -84,7 +84,7 @@ class CompilerDriverTest : public CommonCompilerTest {
Handle<mirror::ClassLoader> loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
for (size_t j = 0; j < c->NumDirectMethods(); j++) {
MakeExecutable(c->GetDirectMethod(j));
}
@@ -101,39 +101,38 @@ class CompilerDriverTest : public CommonCompilerTest {
// Disabled due to 10 second runtime on host
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
- CompileAll(NULL);
+ CompileAll(nullptr);
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex = *java_lang_dex_file_;
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
const mirror::String* string = dex_cache->GetResolvedString(i);
- EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+ EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
}
EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
mirror::Class* type = dex_cache->GetResolvedType(i);
- EXPECT_TRUE(type != NULL) << "type_idx=" << i
+ EXPECT_TRUE(type != nullptr) << "type_idx=" << i
<< " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
}
EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
- EXPECT_TRUE(method != NULL) << "method_idx=" << i
+ EXPECT_TRUE(method != nullptr) << "method_idx=" << i
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
<< " " << dex.GetMethodName(dex.GetMethodId(i));
- EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
- << " "
- << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
- << " " << dex.GetMethodName(dex.GetMethodId(i));
+ EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+ << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+ << dex.GetMethodName(dex.GetMethodId(i));
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
- EXPECT_TRUE(field != NULL) << "field_idx=" << i
+ EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
}
@@ -153,14 +152,14 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
}
- ASSERT_TRUE(class_loader != NULL);
+ ASSERT_TRUE(class_loader != nullptr);
EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
// Create a jobj_ of ConcreteClass, NOT AbstractClass.
jclass c_class = env_->FindClass("ConcreteClass");
jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
jobject jobj_ = env_->NewObject(c_class, constructor);
- ASSERT_TRUE(jobj_ != NULL);
+ ASSERT_TRUE(jobj_ != nullptr);
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 03ae489da1..398300699e 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -21,6 +21,7 @@
#include "dex_file.h"
#include "jni.h"
+#include "base/arena_object.h"
namespace art {
namespace mirror {
@@ -31,7 +32,7 @@ class ClassLinker;
struct CompilationUnit;
class VerifiedMethod;
-class DexCompilationUnit {
+class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
explicit DexCompilationUnit(CompilationUnit* cu);
diff --git a/compiler/dwarf/register.h b/compiler/dwarf/register.h
index fa666dffa9..70452377dd 100644
--- a/compiler/dwarf/register.h
+++ b/compiler/dwarf/register.h
@@ -33,6 +33,7 @@ class Reg {
// There are ways around this in DWARF but they are complex.
// It would be much simpler to always spill whole D registers.
// Arm64 mapping is correct since we already do this there.
+ // libunwind might struggle with the new mapping as well.
static Reg ArmCore(int num) { return Reg(num); }
static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31.
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index b67dd26f71..32c8cce031 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -374,7 +374,7 @@ class ElfSymtabBuilder FINAL : public ElfSectionBuilder<ElfTypes> {
}
Elf_Word GetSize() const {
- // 1 is for the implicit NULL symbol.
+ // 1 is for the implicit null symbol.
return symbols_.size() + 1;
}
@@ -578,7 +578,7 @@ class ElfBuilder FINAL {
hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
sizeof(Elf_Word)),
dynamic_builder_(".dynamic", &dynsym_builder_),
- shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
+ shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, nullptr, 0, 1, 1) {
SetupEhdr();
SetupDynamic();
SetupRequiredSymbols();
@@ -689,7 +689,7 @@ class ElfBuilder FINAL {
// +-------------------------+ (Optional)
// | .debug_line | (Optional)
// +-------------------------+ (Optional)
- // | Elf_Shdr NULL |
+ // | Elf_Shdr null |
// | Elf_Shdr .dynsym |
// | Elf_Shdr .dynstr |
// | Elf_Shdr .hash |
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index cf0adae525..28e6999472 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -99,6 +99,8 @@ static void WriteEhFrameCIE(InstructionSet isa,
return;
}
case kX86: {
+ // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
+ constexpr bool generate_opcodes_for_x86_fp = false;
DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP).
opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP).
@@ -113,8 +115,10 @@ static void WriteEhFrameCIE(InstructionSet isa,
}
}
// fp registers.
- for (int reg = 0; reg < 8; reg++) {
- opcodes.Undefined(Reg::X86Fp(reg));
+ if (generate_opcodes_for_x86_fp) {
+ for (int reg = 0; reg < 8; reg++) {
+ opcodes.Undefined(Reg::X86Fp(reg));
+ }
}
auto return_reg = Reg::X86Core(8); // R8(EIP).
WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 949fcabc8e..3b2ca9451e 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -148,7 +148,7 @@ bool ElfWriterQuick<ElfTypes>::Write(
RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
+ RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
// Do not add to .oat_patches since we will make the addresses relative.
std::vector<uintptr_t> eh_frame_patches;
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b9f8..08523d8587 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@ TEST_F(ElfWriterTest, dlsym) {
LOG(INFO) << "elf_filename=" << elf_filename;
UnreserveImageSpace();
- void* dl_oatdata = NULL;
- void* dl_oatexec = NULL;
- void* dl_oatlastword = NULL;
+ void* dl_oatdata = nullptr;
+ void* dl_oatexec = nullptr;
+ void* dl_oatlastword = nullptr;
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8016831e37..eaf3489f8f 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@ TEST_F(ImageTest, WriteRead) {
// TODO: compile_pic should be a test argument.
{
{
- jobject class_loader = NULL;
+ jobject class_loader = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@ TEST_F(ImageTest, WriteRead) {
}
// Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != NULL);
+ ASSERT_TRUE(dup_oat.get() != nullptr);
{
bool success_image =
@@ -107,7 +107,7 @@ TEST_F(ImageTest, WriteRead) {
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
@@ -118,12 +118,12 @@ TEST_F(ImageTest, WriteRead) {
ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
ASSERT_FALSE(space->IsImageSpace());
- ASSERT_TRUE(space != NULL);
+ ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
}
- ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
+ ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
@@ -137,7 +137,7 @@ TEST_F(ImageTest, WriteRead) {
writer.reset(nullptr);
runtime_.reset();
- java_lang_dex_file_ = NULL;
+ java_lang_dex_file_ = nullptr;
MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@ TEST_F(ImageTest, WriteRead) {
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
- options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+ options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -158,7 +158,7 @@ TEST_F(ImageTest, WriteRead) {
// give it away now and then switch to a more managable ScopedObjectAccess.
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(runtime_.get() != NULL);
+ ASSERT_TRUE(runtime_.get() != nullptr);
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a99ef3470d..fc70d8f998 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -129,7 +129,7 @@ bool ImageWriter::Write(const std::string& image_filename,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
@@ -180,7 +180,7 @@ bool ImageWriter::Write(const std::string& image_filename,
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- if (image_file.get() == NULL) {
+ if (image_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image file " << image_filename;
return false;
}
@@ -519,7 +519,7 @@ bool ImageWriter::AllocMemory() {
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+ class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
}
bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -675,7 +675,7 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATT
if (string_id != nullptr) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
- if (dex_cache->GetResolvedString(string_idx) == NULL) {
+ if (dex_cache->GetResolvedString(string_idx) == nullptr) {
dex_cache->SetResolvedString(string_idx, string);
}
}
@@ -697,7 +697,7 @@ struct NonImageClasses {
};
void ImageWriter::PruneNonImageClasses() {
- if (compiler_driver_.GetImageClasses() == NULL) {
+ if (compiler_driver_.GetImageClasses() == nullptr) {
return;
}
Runtime* runtime = Runtime::Current();
@@ -712,7 +712,7 @@ void ImageWriter::PruneNonImageClasses() {
// Remove the undesired classes from the class roots.
for (const std::string& it : non_image_classes) {
- bool result = class_linker->RemoveClass(it.c_str(), NULL);
+ bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -724,13 +724,13 @@ void ImageWriter::PruneNonImageClasses() {
DexCache* dex_cache = class_linker->GetDexCache(idx);
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
- if (klass != NULL && !IsImageClass(klass)) {
- dex_cache->SetResolvedType(i, NULL);
+ if (klass != nullptr && !IsImageClass(klass)) {
+ dex_cache->SetResolvedType(i, nullptr);
}
}
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
ArtMethod* method = dex_cache->GetResolvedMethod(i);
- if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+ if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
dex_cache->SetResolvedMethod(i, resolution_method);
}
}
@@ -777,14 +777,14 @@ void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
void ImageWriter::DumpImageClasses() {
auto image_classes = compiler_driver_.GetImageClasses();
- CHECK(image_classes != NULL);
+ CHECK(image_classes != nullptr);
for (const std::string& image_class : *image_classes) {
LOG(INFO) << " " << image_class;
}
}
void ImageWriter::CalculateObjectBinSlots(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
@@ -856,7 +856,7 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
- CHECK(image_roots->Get(i) != NULL);
+ CHECK(image_roots->Get(i) != nullptr);
}
return image_roots.Get();
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc74e2..436fc0cfd0 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
@@ -122,7 +122,7 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea50bf..6f2cb25911 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -152,9 +152,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// References need placing in handle scope and the entry value passing
if (ref_param) {
// Compute handle scope entry, note null is placed in the handle scope but its boxed value
- // must be NULL
+ // must be null.
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame and doesn't run into the saved segment state
+ // Check handle scope offset is within frame and doesn't run into the saved segment state.
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -243,9 +243,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 7. Iterate over arguments placing values from managed calling convention in
// to the convention required for a native call (shuffling). For references
// place an index/pointer to the reference after checking whether it is
- // NULL (which must be encoded as NULL).
+ // null (which must be encoded as null).
// Note: we do this prior to materializing the JNIEnv* and static's jclass to
- // give as many free registers for the shuffle as possible
+ // give as many free registers for the shuffle as possible.
mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
uint32_t args_count = 0;
while (mr_conv->HasNext()) {
@@ -451,7 +451,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ArrayRef<const LinkerPatch>());
}
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
static void CopyParameter(Assembler* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
@@ -469,7 +469,7 @@ static void CopyParameter(Assembler* jni_asm,
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in handle scope and the entry address passing
+ // References need placing in handle scope and the entry address passing.
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
// Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204f58..d2d38da49f 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
image_file_location_oat_begin_(image_file_location_oat_begin),
image_patch_delta_(image_patch_delta),
key_value_store_(key_value_store),
- oat_header_(NULL),
+ oat_header_(nullptr),
size_dex_file_alignment_(0),
size_executable_offset_alignment_(0),
size_oat_header_(0),
@@ -326,7 +326,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -893,7 +893,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -940,7 +940,7 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
}
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != NULL) { // ie not an empty class, such as a marker interface
+ if (class_data != nullptr) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
while (it.HasNextStaticField()) {
it.Next();
@@ -987,7 +987,7 @@ size_t OatWriter::InitOatDexFiles(size_t offset) {
// create the OatDexFiles
for (size_t i = 0; i != dex_files_->size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
oat_dex_files_.push_back(oat_dex_file);
offset += oat_dex_file->SizeOf();
@@ -1471,13 +1471,13 @@ OatWriter::OatClass::OatClass(size_t offset,
oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
} else {
- method_bitmap_ = NULL;
+ method_bitmap_ = nullptr;
method_bitmap_size_ = 0;
}
for (size_t i = 0; i < num_methods; i++) {
CompiledMethod* compiled_method = compiled_methods_[i];
- if (compiled_method == NULL) {
+ if (compiled_method == nullptr) {
oat_method_offsets_offsets_from_oat_class_[i] = 0;
} else {
oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a8eb..8c79b44153 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -235,13 +235,13 @@ class OatWriter {
// used to validate file position when writing.
size_t offset_;
- // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+ // CompiledMethods for each class_def_method_index, or null if no method is available.
std::vector<CompiledMethod*> compiled_methods_;
// Offset from OatClass::offset_ to the OatMethodOffsets for the
// class_def_method_index. If 0, it means the corresponding
// CompiledMethod entry in OatClass::compiled_methods_ should be
- // NULL and that the OatClass::type_ should be kOatClassBitmap.
+ // null and that the OatClass::type_ should be kOatClassBitmap.
std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
// data to write
@@ -258,12 +258,12 @@ class OatWriter {
// OatClassType::type_ is kOatClassBitmap, a set bit indicates the
// method has an OatMethodOffsets in methods_offsets_, otherwise
// the entry was ommited to save space. If OatClassType::type_ is
- // not is kOatClassBitmap, the bitmap will be NULL.
+ // not is kOatClassBitmap, the bitmap will be null.
BitVector* method_bitmap_;
// OatMethodOffsets and OatMethodHeaders for each CompiledMethod
// present in the OatClass. Note that some may be missing if
- // OatClass::compiled_methods_ contains NULL values (and
+ // OatClass::compiled_methods_ contains null values (and
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5d96..708733e28c 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
buckets_owned_(allocator, num_buckets_, false),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
- // buckets_owned_ are initialized to nullptr and false, respectively.
+ // buckets_owned_ are initialized to null and false, respectively.
DCHECK(IsPowerOfTwo(num_buckets_));
if (num_buckets_ == to_copy.num_buckets_) {
// Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 98c0eedeb2..225af77a19 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -427,9 +427,16 @@ void InstructionSimplifierVisitor::VisitMul(HMul* instruction) {
if (Primitive::IsIntOrLongType(type)) {
int64_t factor = Int64FromConstant(input_cst);
- // We expect the `0` case to have been handled in the constant folding pass.
- DCHECK_NE(factor, 0);
- if (IsPowerOfTwo(factor)) {
+ // Even though constant propagation also takes care of the zero case, other
+ // optimizations can lead to having a zero multiplication.
+ if (factor == 0) {
+ // Replace code looking like
+ // MUL dst, src, 0
+ // with
+ // 0
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (IsPowerOfTwo(factor)) {
// Replace code looking like
// MUL dst, src, pow_of_2
// with
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index bef5896491..6ab57b8e50 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -714,7 +714,7 @@ HConstant* HUnaryOperation::TryStaticEvaluation() const {
// TODO: Implement static evaluation of long unary operations.
//
// Do not exit with a fatal condition here. Instead, simply
- // return `nullptr' to notify the caller that this instruction
+ // return `null' to notify the caller that this instruction
// cannot (yet) be statically evaluated.
return nullptr;
}
@@ -750,7 +750,7 @@ HConstant* HBinaryOperation::GetConstantRight() const {
}
// If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
HInstruction* most_constant_right = GetConstantRight();
if (most_constant_right == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4923b3c1ff..19227cad69 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1632,7 +1632,7 @@ class HUnaryOperation : public HExpression<1> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
@@ -1700,7 +1700,7 @@ class HBinaryOperation : public HExpression<2> {
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
@@ -1708,11 +1708,11 @@ class HBinaryOperation : public HExpression<2> {
virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
// Returns an input that can legally be used as the right input and is
- // constant, or nullptr.
+ // constant, or null.
HConstant* GetConstantRight() const;
// If `GetConstantRight()` returns one of the input, this returns the other
- // one. Otherwise it returns nullptr.
+ // one. Otherwise it returns null.
HInstruction* GetLeastConstantLeft() const;
DECLARE_INSTRUCTION(BinaryOperation);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 03f5545755..fe70d3a861 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -333,7 +333,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
@@ -596,7 +597,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
previous->next_ = nullptr;
new_interval->first_range_ = current;
if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
- // Search start point is inside `new_interval`. Change it to nullptr
+ // Search start point is inside `new_interval`. Change it to null
// (i.e. the end of the interval) in the original interval.
range_search_start_ = nullptr;
}
@@ -863,7 +864,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
defined_by_(defined_by) {}
// Searches for a LiveRange that either covers the given position or is the
- // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // first next LiveRange. Returns null if no such LiveRange exists. Ranges
// known to end before `position` can be skipped with `search_start`.
LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba98926b3..fbc9d0d8fc 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@ TEST_F(OutputStreamTest, File) {
SetOutputStream(output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@ TEST_F(OutputStreamTest, File) {
TEST_F(OutputStreamTest, Buffered) {
ScratchFile tmp;
std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
- CHECK(file_output_stream.get() != NULL);
+ CHECK(file_output_stream.get() != nullptr);
BufferedOutputStream buffered_output_stream(file_output_stream.release());
SetOutputStream(buffered_output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2df4..313f365df6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -739,17 +739,17 @@ class ArmAssembler : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af6c4..e47b5314fd 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c61c5..b016e74aba 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@ AssemblerBuffer::AssemblerBuffer() {
contents_ = NewContents(kInitialBufferCapacity);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
- fixup_ = NULL;
- slow_path_ = NULL;
+ fixup_ = nullptr;
+ slow_path_ = nullptr;
#ifndef NDEBUG
has_ensured_capacity_ = false;
fixups_processed_ = false;
@@ -61,7 +61,7 @@ AssemblerBuffer::~AssemblerBuffer() {
void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
AssemblerFixup* fixup = fixup_;
- while (fixup != NULL) {
+ while (fixup != nullptr) {
fixup->Process(region, fixup->position());
fixup = fixup->previous();
}
@@ -127,7 +127,7 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
return new x86_64::X86_64Assembler();
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3dd1e..2e3a47bb91 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@ class AssemblerFixup {
// Parent of all queued slow paths, emitted during finalization
class SlowPath {
public:
- SlowPath() : next_(NULL) {}
+ SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@ class AssemblerBuffer {
}
void EnqueueSlowPath(SlowPath* slowpath) {
- if (slow_path_ == NULL) {
+ if (slow_path_ == nullptr) {
slow_path_ = slowpath;
} else {
SlowPath* cur = slow_path_;
- for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
cur->next_ = slowpath;
}
}
void EmitSlowPaths(Assembler* sp_asm) {
SlowPath* cur = slow_path_;
- SlowPath* next = NULL;
- slow_path_ = NULL;
- for ( ; cur != NULL ; cur = next) {
+ SlowPath* next = nullptr;
+ slow_path_ = nullptr;
+ for ( ; cur != nullptr ; cur = next) {
cur->Emit(sp_asm);
next = cur->next_;
delete cur;
@@ -489,14 +489,14 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2aa86..a9a5781093 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@ class DedupeSet {
struct HashedKey {
StoreKey* store_ptr;
union {
- HashType store_hash; // Valid if store_ptr != nullptr.
- const HashedInKey* in_key; // Valid if store_ptr == nullptr.
+ HashType store_hash; // Valid if store_ptr != null.
+ const HashedInKey* in_key; // Valid if store_ptr == null.
};
};
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb4164e..d4acf03dc9 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister mscratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7cb2..b7f6a9e83a 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -235,14 +235,14 @@ class Mips64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
new file mode 100644
index 0000000000..ab039aa215
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+#define ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+
+#include <cstring>
+#include <set>
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "utils.h"
+
+namespace art {
+
+class TestDexFileBuilder {
+ public:
+ TestDexFileBuilder()
+ : strings_(), types_(), fields_(), protos_(), dex_file_data_() {
+ }
+
+ void AddString(const std::string& str) {
+ CHECK(dex_file_data_.empty());
+ auto it = strings_.emplace(str, IdxAndDataOffset()).first;
+ CHECK_LT(it->first.length(), 128u); // Don't allow multi-byte length in uleb128.
+ }
+
+ void AddType(const std::string& descriptor) {
+ CHECK(dex_file_data_.empty());
+ AddString(descriptor);
+ types_.emplace(descriptor, 0u);
+ }
+
+ void AddField(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddType(type);
+ AddString(name);
+ FieldKey key = { class_descriptor, type, name };
+ fields_.emplace(key, 0u);
+ }
+
+ void AddMethod(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddString(name);
+
+ ProtoKey proto_key = CreateProtoKey(signature);
+ AddString(proto_key.shorty);
+ AddType(proto_key.return_type);
+ for (const auto& arg_type : proto_key.args) {
+ AddType(arg_type);
+ }
+ auto it = protos_.emplace(proto_key, IdxAndDataOffset()).first;
+ const ProtoKey* proto = &it->first; // Valid as long as the element remains in protos_.
+
+ MethodKey method_key = {
+ class_descriptor, name, proto
+ };
+ methods_.emplace(method_key, 0u);
+ }
+
+ // NOTE: The builder holds the actual data, so it must live as long as the dex file.
+ std::unique_ptr<const DexFile> Build(const std::string& dex_location) {
+ CHECK(dex_file_data_.empty());
+ union {
+ uint8_t data[sizeof(DexFile::Header)];
+ uint64_t force_alignment;
+ } header_data;
+ std::memset(header_data.data, 0, sizeof(header_data.data));
+ DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
+ std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
+ std::copy_n(DexFile::kDexMagicVersion, 4u, header->magic_ + 4u);
+ header->header_size_ = sizeof(header);
+ header->endian_tag_ = DexFile::kDexEndianConstant;
+ header->link_size_ = 0u; // Unused.
+ header->link_off_ = 0u; // Unused.
+ header->map_off_ = 0u; // Unused.
+
+ uint32_t data_section_size = 0u;
+
+ uint32_t string_ids_offset = sizeof(DexFile::Header);
+ uint32_t string_idx = 0u;
+ for (auto& entry : strings_) {
+ entry.second.idx = string_idx;
+ string_idx += 1u;
+ entry.second.data_offset = data_section_size;
+ data_section_size += entry.first.length() + 1u /* length */ + 1u /* null-terminator */;
+ }
+ header->string_ids_size_ = strings_.size();
+ header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
+
+ uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+ uint32_t type_idx = 0u;
+ for (auto& entry : types_) {
+ entry.second = type_idx;
+ type_idx += 1u;
+ }
+ header->type_ids_size_ = types_.size();
+ header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
+
+ uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+ uint32_t proto_idx = 0u;
+ for (auto& entry : protos_) {
+ entry.second.idx = proto_idx;
+ proto_idx += 1u;
+ size_t num_args = entry.first.args.size();
+ if (num_args != 0u) {
+ entry.second.data_offset = RoundUp(data_section_size, 4u);
+ data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+ } else {
+ entry.second.data_offset = 0u;
+ }
+ }
+ header->proto_ids_size_ = protos_.size();
+ header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
+
+ uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+ uint32_t field_idx = 0u;
+ for (auto& entry : fields_) {
+ entry.second = field_idx;
+ field_idx += 1u;
+ }
+ header->field_ids_size_ = fields_.size();
+ header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
+
+ uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+ uint32_t method_idx = 0u;
+ for (auto& entry : methods_) {
+ entry.second = method_idx;
+ method_idx += 1u;
+ }
+ header->method_ids_size_ = methods_.size();
+ header->method_ids_off_ = methods_.empty() ? 0u : method_ids_offset;
+
+ // No class defs.
+ header->class_defs_size_ = 0u;
+ header->class_defs_off_ = 0u;
+
+ uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+ header->data_size_ = data_section_size;
+ header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
+
+ uint32_t total_size = data_section_offset + data_section_size;
+
+ dex_file_data_.resize(total_size);
+ std::memcpy(&dex_file_data_[0], header_data.data, sizeof(DexFile::Header));
+
+ for (const auto& entry : strings_) {
+ CHECK_LT(entry.first.size(), 128u);
+ uint32_t raw_offset = data_section_offset + entry.second.data_offset;
+ dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
+ std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
+ Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+ }
+
+ for (const auto& entry : types_) {
+ Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+ ++type_idx;
+ }
+
+ for (const auto& entry : protos_) {
+ size_t num_args = entry.first.args.size();
+ uint32_t type_list_offset =
+ (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
+ uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+ Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
+ Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
+ Write32(raw_offset + 8u, type_list_offset);
+ if (num_args != 0u) {
+ CHECK_NE(entry.second.data_offset, 0u);
+ Write32(type_list_offset, num_args);
+ for (size_t i = 0; i != num_args; ++i) {
+ Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+ GetTypeIdx(entry.first.args[i]));
+ }
+ }
+ }
+
+ for (const auto& entry : fields_) {
+ uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ for (const auto& entry : methods_) {
+ uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ auto it = protos_.find(*entry.first.proto);
+ CHECK(it != protos_.end());
+ Write16(raw_offset + 2u, it->second.idx);
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ // Leave checksum and signature as zeros.
+
+ std::string error_msg;
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+ &dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
+ CHECK(dex_file != nullptr) << error_msg;
+ return std::move(dex_file);
+ }
+
+ uint32_t GetStringIdx(const std::string& type) {
+ auto it = strings_.find(type);
+ CHECK(it != strings_.end());
+ return it->second.idx;
+ }
+
+ uint32_t GetTypeIdx(const std::string& type) {
+ auto it = types_.find(type);
+ CHECK(it != types_.end());
+ return it->second;
+ }
+
+ uint32_t GetFieldIdx(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ FieldKey key = { class_descriptor, type, name };
+ auto it = fields_.find(key);
+ CHECK(it != fields_.end());
+ return it->second;
+ }
+
+ uint32_t GetMethodIdx(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ ProtoKey proto_key = CreateProtoKey(signature);
+ MethodKey method_key = { class_descriptor, name, &proto_key };
+ auto it = methods_.find(method_key);
+ CHECK(it != methods_.end());
+ return it->second;
+ }
+
+ private:
+ struct IdxAndDataOffset {
+ uint32_t idx;
+ uint32_t data_offset;
+ };
+
+ struct FieldKey {
+ const std::string class_descriptor;
+ const std::string type;
+ const std::string name;
+ };
+ struct FieldKeyComparator {
+ bool operator()(const FieldKey& lhs, const FieldKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return lhs.type < rhs.type;
+ }
+ };
+
+ struct ProtoKey {
+ std::string shorty;
+ std::string return_type;
+ std::vector<std::string> args;
+ };
+ struct ProtoKeyComparator {
+ bool operator()(const ProtoKey& lhs, const ProtoKey& rhs) const {
+ if (lhs.return_type != rhs.return_type) {
+ return lhs.return_type < rhs.return_type;
+ }
+ size_t min_args = std::min(lhs.args.size(), rhs.args.size());
+ for (size_t i = 0; i != min_args; ++i) {
+ if (lhs.args[i] != rhs.args[i]) {
+ return lhs.args[i] < rhs.args[i];
+ }
+ }
+ return lhs.args.size() < rhs.args.size();
+ }
+ };
+
+ struct MethodKey {
+ std::string class_descriptor;
+ std::string name;
+ const ProtoKey* proto;
+ };
+ struct MethodKeyComparator {
+ bool operator()(const MethodKey& lhs, const MethodKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return ProtoKeyComparator()(*lhs.proto, *rhs.proto);
+ }
+ };
+
+ ProtoKey CreateProtoKey(const std::string& signature) {
+ CHECK_EQ(signature[0], '(');
+ const char* args = signature.c_str() + 1;
+ const char* args_end = std::strchr(args, ')');
+ CHECK(args_end != nullptr);
+ const char* return_type = args_end + 1;
+
+ ProtoKey key = {
+ std::string() + ((*return_type == '[') ? 'L' : *return_type),
+ return_type,
+ std::vector<std::string>()
+ };
+ while (args != args_end) {
+ key.shorty += (*args == '[') ? 'L' : *args;
+ const char* arg_start = args;
+ while (*args == '[') {
+ ++args;
+ }
+ if (*args == 'L') {
+ do {
+ ++args;
+ CHECK_NE(args, args_end);
+ } while (*args != ';');
+ }
+ ++args;
+ key.args.emplace_back(arg_start, args);
+ }
+ return key;
+ }
+
+ void Write32(size_t offset, uint32_t value) {
+ CHECK_LE(offset + 4u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ CHECK_EQ(dex_file_data_[offset + 2], 0u);
+ CHECK_EQ(dex_file_data_[offset + 3], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ dex_file_data_[offset + 2] = static_cast<uint8_t>(value >> 16);
+ dex_file_data_[offset + 3] = static_cast<uint8_t>(value >> 24);
+ }
+
+ void Write16(size_t offset, uint32_t value) {
+ CHECK_LE(value, 0xffffu);
+ CHECK_LE(offset + 2u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ }
+
+ std::map<std::string, IdxAndDataOffset> strings_;
+ std::map<std::string, uint32_t> types_;
+ std::map<FieldKey, uint32_t, FieldKeyComparator> fields_;
+ std::map<ProtoKey, IdxAndDataOffset, ProtoKeyComparator> protos_;
+ std::map<MethodKey, uint32_t, MethodKeyComparator> methods_;
+
+ std::vector<uint8_t> dex_file_data_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
new file mode 100644
index 0000000000..ee6e35dcce
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test_dex_file_builder.h"
+
+#include "dex_file-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(TestDexFileBuilderTest, SimpleTest) {
+ TestDexFileBuilder builder;
+ builder.AddString("Arbitrary string");
+ builder.AddType("Ljava/lang/Class;");
+ builder.AddField("LTestClass;", "[I", "intField");
+ builder.AddMethod("LTestClass;", "()I", "foo");
+ builder.AddMethod("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar");
+ const char* dex_location = "TestDexFileBuilder/SimpleTest";
+ std::unique_ptr<const DexFile> dex_file(builder.Build(dex_location));
+ ASSERT_TRUE(dex_file != nullptr);
+ EXPECT_STREQ(dex_location, dex_file->GetLocation().c_str());
+
+ static const char* const expected_strings[] = {
+ "Arbitrary string",
+ "I",
+ "LLL", // shorty
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ "bar",
+ "foo",
+ "intField",
+ };
+ ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
+ for (size_t i = 0; i != arraysize(expected_strings); ++i) {
+ EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+ }
+
+ static const char* const expected_types[] = {
+ "I",
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ };
+ ASSERT_EQ(arraysize(expected_types), dex_file->NumTypeIds());
+ for (size_t i = 0; i != arraysize(expected_types); ++i) {
+ EXPECT_STREQ(expected_types[i], dex_file->GetTypeDescriptor(dex_file->GetTypeId(i))) << i;
+ }
+
+ ASSERT_EQ(1u, dex_file->NumFieldIds());
+ EXPECT_STREQ("[I TestClass.intField", PrettyField(0u, *dex_file).c_str());
+
+ ASSERT_EQ(2u, dex_file->NumProtoIds());
+ ASSERT_EQ(2u, dex_file->NumMethodIds());
+ EXPECT_STREQ("TestClass TestClass.bar(java.lang.Object, java.lang.Object[])",
+ PrettyMethod(0u, *dex_file).c_str());
+ EXPECT_STREQ("int TestClass.foo()",
+ PrettyMethod(1u, *dex_file).c_str());
+
+ EXPECT_EQ(0u, builder.GetStringIdx("Arbitrary string"));
+ EXPECT_EQ(2u, builder.GetTypeIdx("Ljava/lang/Class;"));
+ EXPECT_EQ(0u, builder.GetFieldIdx("LTestClass;", "[I", "intField"));
+ EXPECT_EQ(1u, builder.GetMethodIdx("LTestClass;", "()I", "foo"));
+ EXPECT_EQ(0u, builder.GetMethodIdx("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar"));
+}
+
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474a39..7fc8ef0815 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -576,17 +576,17 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0344f52a3f..c0ca7ef437 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2751,7 +2751,7 @@ void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of NULL
+ // Use out_reg as indicator of null.
in_reg = out_reg;
// TODO: movzwl
movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 79ad8f53e3..f5327a8d02 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -711,17 +711,17 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
virtual void LoadReferenceFromHandleScope(ManagedRegister dst,