diff options
-rw-r--r-- | compiler/dex/frontend.cc | 2 | ||||
-rw-r--r-- | compiler/dex/mir_analysis.cc | 2 | ||||
-rw-r--r-- | compiler/dex/mir_dataflow.cc | 3 | ||||
-rw-r--r-- | compiler/dex/mir_graph.cc | 8 | ||||
-rw-r--r-- | compiler/dex/mir_graph.h | 4 | ||||
-rw-r--r-- | compiler/dex/mir_optimization.cc | 12 | ||||
-rw-r--r-- | compiler/dex/portable/mir_to_gbc.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/dex_file_method_inliner.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/utility_x86.cc | 2 | ||||
-rw-r--r-- | compiler/dex/vreg_analysis.cc | 6 |
11 files changed, 24 insertions, 25 deletions
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index 1570c3a241..6bb22846cd 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -758,7 +758,7 @@ static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, // Check if we support the byte code. if (std::find(support_list, support_list + support_list_size, opcode) == support_list + support_list_size) { - if (opcode < kMirOpFirst) { + if (!cu.mir_graph->IsPseudoMirOp(opcode)) { VLOG(compiler) << "Unsupported dalvik byte code : " << mir->dalvikInsn.opcode; } else { diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc index 508f1c70bd..7129f8a501 100644 --- a/compiler/dex/mir_analysis.cc +++ b/compiler/dex/mir_analysis.cc @@ -902,7 +902,7 @@ void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) { while (!done) { tbb->visited = true; for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) { - if (static_cast<uint32_t>(mir->dalvikInsn.opcode) >= kMirOpFirst) { + if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { // Skip any MIR pseudo-op. continue; } diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 5ff62743ce..9fea709568 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -1015,8 +1015,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { uint64_t df_attributes = GetDataFlowAttributes(mir); // If not a pseudo-op, note non-leaf or can throw - if (static_cast<int>(mir->dalvikInsn.opcode) < - static_cast<int>(kNumPackedOpcodes)) { + if (!IsPseudoMirOp(mir->dalvikInsn.opcode)) { int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode); if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) { diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 99dd50a53c..db28f3a0e9 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -549,8 +549,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse new_block->predecessors->Insert(cur_block->id); MIR* new_insn = NewMIR(); *new_insn = *insn; - insn->dalvikInsn.opcode = - static_cast<Instruction::Code>(kMirOpCheck); + insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck); // Associate the two halves. insn->meta.throw_insn = new_insn; new_block->AppendMIR(new_insn); @@ -837,8 +836,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff } else { fprintf(file, " {%04x %s %s %s\\l}%s\\\n", mir->offset, mir->ssa_rep ? GetDalvikDisassembly(mir) : - (opcode < kMirOpFirst) ? - Instruction::Name(mir->dalvikInsn.opcode) : + !IsPseudoMirOp(opcode) ? Instruction::Name(mir->dalvikInsn.opcode) : extended_mir_op_names_[opcode - kMirOpFirst], (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ", (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ", @@ -1141,7 +1139,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) { nop = true; } - if (opcode >= kMirOpFirst) { + if (IsPseudoMirOp(opcode)) { str.append(extended_mir_op_names_[opcode - kMirOpFirst]); } else { dalvik_format = Instruction::FormatOf(insn.opcode); diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index b04c16ea78..38cd5ee449 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -905,11 +905,11 @@ class MIRGraph { return backward_branches_ + forward_branches_; } - bool IsPseudoMirOp(Instruction::Code opcode) { + static bool IsPseudoMirOp(Instruction::Code opcode) { return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst); } - bool IsPseudoMirOp(int opcode) { + static bool IsPseudoMirOp(int opcode) { return opcode >= static_cast<int>(kMirOpFirst); } diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 256686ebe1..23f2516899 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -129,17 +129,16 @@ MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) { BasicBlock* tbb = bb; mir = AdvanceMIR(&tbb, mir); while (mir != NULL) { - int opcode = mir->dalvikInsn.opcode; if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) || (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) || (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) { break; } // Keep going if pseudo op, otherwise terminate - if (opcode < kNumPackedOpcodes) { - mir = NULL; - } else { + if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { mir = AdvanceMIR(&tbb, mir); + } else { + mir = NULL; } } return mir; @@ -866,7 +865,7 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) { struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through); for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL; tmir =tmir->next) { - if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) { + if (IsPseudoMirOp(tmir->dalvikInsn.opcode)) { continue; } // First non-pseudo should be MOVE_RESULT_OBJECT @@ -1183,6 +1182,9 @@ void MIRGraph::InlineCalls(BasicBlock* bb) { return; } for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { + if (IsPseudoMirOp(mir->dalvikInsn.opcode)) { + continue; + } if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) { continue; } diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 576e2424fa..fd67608e73 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -712,7 +712,7 @@ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, int opt_flags = mir->optimization_flags; if (cu_->verbose) { - if (op_val < kMirOpFirst) { + if (!IsPseudoMirOp(op_val)) { LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val; } else { LOG(INFO) << mir_graph_->extended_mir_op_names_[op_val - kMirOpFirst] << " 0x" << std::hex << op_val; @@ -1550,7 +1550,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { SetDexOffset(bb->start_offset); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; - if (opcode < kMirOpFirst) { + if (!IsPseudoMirOp(opcode)) { // Stop after first non-pseudo MIR op. continue; } @@ -1759,7 +1759,7 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { } } - if (opcode >= kMirOpFirst) { + if (IsPseudoMirOp(opcode)) { ConvertExtendedMIR(bb, mir, llvm_bb); continue; } diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 526c981ae9..6397208790 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -43,6 +43,7 @@ MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) { uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) { DCHECK_LT(arg, invoke->dalvikInsn.vA); + DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode)); if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) { return invoke->dalvikInsn.vC + arg; // Non-range invoke. } else { @@ -53,6 +54,7 @@ uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) { bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) { DCHECK_LT(arg + 1, invoke->dalvikInsn.vA); + DCHECK(!MIRGraph::IsPseudoMirOp(invoke->dalvikInsn.opcode)); return Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc || invoke->dalvikInsn.arg[arg + 1u] == invoke->dalvikInsn.arg[arg] + 1u; } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 9fc93d0a1a..df56820c4c 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -1069,7 +1069,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) { work_half->meta.throw_insn = mir; } - if (opcode >= kMirOpFirst) { + if (MIRGraph::IsPseudoMirOp(opcode)) { HandleExtendedMethodMIR(bb, mir); continue; } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index ee0225738a..092e68e6b4 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -869,7 +869,7 @@ void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { int opcode = mir->dalvikInsn.opcode; - if (opcode >= kMirOpFirst) { + if (MIRGraph::IsPseudoMirOp(opcode)) { AnalyzeExtendedMIR(opcode, bb, mir); } else { AnalyzeMIR(opcode, bb, mir); diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index a4c62ade40..c4af9cb55c 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -233,8 +233,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) { // Special-case handling for format 35c/3rc invokes Instruction::Code opcode = mir->dalvikInsn.opcode; - int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes) - ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode); + int flags = IsPseudoMirOp(opcode) ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode); if ((flags & Instruction::kInvoke) && (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) { DCHECK_EQ(next, 0); @@ -317,8 +316,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) { * The Phi set will include all low words or all high * words, so we have to treat them specially. */ - bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == - kMirOpPhi); + bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi); RegLocation rl_temp = reg_location_[defs[0]]; bool defined_fp = rl_temp.defined && rl_temp.fp; bool defined_core = rl_temp.defined && rl_temp.core; |