diff options
author | Chao-ying Fu <chao-ying.fu@intel.com> | 2014-11-11 16:48:40 -0800 |
---|---|---|
committer | Chao-ying Fu <chao-ying.fu@intel.com> | 2015-02-09 15:15:15 -0800 |
commit | 72f53af0307b9109a1cfc0671675ce5d45c66d3a (patch) | |
tree | fc25359ca59f8f3b69a03a7d3726d615086ce1f4 | |
parent | 2a3611feeb12bd73ccdbb4692f9ca3705f925d56 (diff) | |
download | android_art-72f53af0307b9109a1cfc0671675ce5d45c66d3a.tar.gz android_art-72f53af0307b9109a1cfc0671675ce5d45c66d3a.tar.bz2 android_art-72f53af0307b9109a1cfc0671675ce5d45c66d3a.zip |
ART: Remove MIRGraph::dex_pc_to_block_map_
This patch removes MIRGraph::dex_pc_to_block_map_, adds a local
variable dex_pc_to_block_map inside MIRGraph::InlineMethod(), and
updates several functions to pass dex_pc_to_block_map.
The goal is to limit the scope of dex_pc_to_block_map and
the usage of FindBlock, so that various compiler optimizations
cannot rely on dex pc to look up basic blocks to avoid
duplicated dex pc issues.
Also, this patch changes quick targets to use successor blocks
for switch case target generation at Mir2Lir::InstallSwitchTables().
Change-Id: I9f571efebd2706b4e1606279bd61f3b406ecd1c4
Signed-off-by: Chao-ying Fu <chao-ying.fu@intel.com>
-rw-r--r-- | compiler/dex/mir_graph.cc | 114 | ||||
-rw-r--r-- | compiler/dex/mir_graph.h | 20 | ||||
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 11 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 11 | ||||
-rw-r--r-- | compiler/dex/quick/codegen_util.cc | 85 | ||||
-rw-r--r-- | compiler/dex/quick/mips/call_mips.cc | 12 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 7 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 84 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 3 |
9 files changed, 127 insertions, 220 deletions
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 0f7d45df79..93a31e921a 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -113,7 +113,6 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena) entry_block_(NULL), exit_block_(NULL), current_code_item_(NULL), - dex_pc_to_block_map_(arena->Adapter()), m_units_(arena->Adapter()), method_stack_(arena->Adapter()), current_method_(kInvalidEntry), @@ -268,31 +267,14 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, DCHECK(insn != orig_block->first_mir_insn); DCHECK(insn == bottom_block->first_mir_insn); DCHECK_EQ(insn->offset, bottom_block->start_offset); - DCHECK_EQ(dex_pc_to_block_map_[insn->offset], orig_block->id); // Scan the "bottom" instructions, remapping them to the // newly created "bottom" block. MIR* p = insn; p->bb = bottom_block->id; - dex_pc_to_block_map_[p->offset] = bottom_block->id; while (p != bottom_block->last_mir_insn) { p = p->next; DCHECK(p != nullptr); p->bb = bottom_block->id; - int opcode = p->dalvikInsn.opcode; - /* - * Some messiness here to ensure that we only enter real opcodes and only the - * first half of a potentially throwing instruction that has been split into - * CHECK and work portions. Since the 2nd half of a split operation is always - * the first in a BasicBlock, we can't hit it here. - */ - if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { - BasicBlockId mapped_id = dex_pc_to_block_map_[p->offset]; - // At first glance the instructions should all be mapped to orig_block. - // However, multiple instructions may correspond to the same dex, hence an earlier - // instruction may have already moved the mapping for dex to bottom_block. - DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id)); - dex_pc_to_block_map_[p->offset] = bottom_block->id; - } } return bottom_block; @@ -307,12 +289,13 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset, * Utilizes a map for fast lookup of the typical cases. */ BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create, - BasicBlock** immed_pred_block_p) { + BasicBlock** immed_pred_block_p, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { if (code_offset >= current_code_item_->insns_size_in_code_units_) { return nullptr; } - int block_id = dex_pc_to_block_map_[code_offset]; + int block_id = (*dex_pc_to_block_map)[code_offset]; BasicBlock* bb = GetBasicBlock(block_id); if ((bb != nullptr) && (bb->start_offset == code_offset)) { @@ -327,19 +310,46 @@ BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create, if (bb != nullptr) { // The target exists somewhere in an existing block. - return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr); + BasicBlock* bottom_block = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr); + DCHECK(bottom_block != nullptr); + MIR* p = bottom_block->first_mir_insn; + BasicBlock* orig_block = bb; + DCHECK_EQ((*dex_pc_to_block_map)[p->offset], orig_block->id); + // Scan the "bottom" instructions, remapping them to the + // newly created "bottom" block. + (*dex_pc_to_block_map)[p->offset] = bottom_block->id; + while (p != bottom_block->last_mir_insn) { + p = p->next; + DCHECK(p != nullptr); + int opcode = p->dalvikInsn.opcode; + /* + * Some messiness here to ensure that we only enter real opcodes and only the + * first half of a potentially throwing instruction that has been split into + * CHECK and work portions. Since the 2nd half of a split operation is always + * the first in a BasicBlock, we can't hit it here. + */ + if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { + BasicBlockId mapped_id = (*dex_pc_to_block_map)[p->offset]; + // At first glance the instructions should all be mapped to orig_block. + // However, multiple instructions may correspond to the same dex, hence an earlier + // instruction may have already moved the mapping for dex to bottom_block. + DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id)); + (*dex_pc_to_block_map)[p->offset] = bottom_block->id; + } + } + return bottom_block; } // Create a new block. bb = CreateNewBB(kDalvikByteCode); bb->start_offset = code_offset; - dex_pc_to_block_map_[bb->start_offset] = bb->id; + (*dex_pc_to_block_map)[bb->start_offset] = bb->id; return bb; } /* Identify code range in try blocks and set up the empty catch blocks */ -void MIRGraph::ProcessTryCatchBlocks() { +void MIRGraph::ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { int tries_size = current_code_item_->tries_size_; DexOffset offset; @@ -364,7 +374,7 @@ void MIRGraph::ProcessTryCatchBlocks() { CatchHandlerIterator iterator(handlers_ptr); for (; iterator.HasNext(); iterator.Next()) { uint32_t address = iterator.GetHandlerAddress(); - FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr); + FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); } handlers_ptr = iterator.EndDataPointer(); } @@ -439,7 +449,8 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, /* Process instructions with the kBranch flag */ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, int flags, const uint16_t* code_ptr, - const uint16_t* code_end) { + const uint16_t* code_end, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { DexOffset target = cur_offset; switch (insn->dalvikInsn.opcode) { case Instruction::GOTO: @@ -470,7 +481,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs } CountBranch(target); BasicBlock* taken_block = FindBlock(target, /* create */ true, - /* immed_pred_block_p */ &cur_block); + /* immed_pred_block_p */ &cur_block, + dex_pc_to_block_map); cur_block->taken = taken_block->id; taken_block->predecessors.push_back(cur_block->id); @@ -480,18 +492,20 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs /* create */ true, /* immed_pred_block_p */ - &cur_block); + &cur_block, + dex_pc_to_block_map); cur_block->fall_through = fallthrough_block->id; fallthrough_block->predecessors.push_back(cur_block->id); } else if (code_ptr < code_end) { - FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr); + FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); } return cur_block; } /* Process instructions with the kSwitch flag */ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, - int width, int flags) { + int width, int flags, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { UNUSED(flags); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB); @@ -545,7 +559,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs for (i = 0; i < size; i++) { BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true, - /* immed_pred_block_p */ &cur_block); + /* immed_pred_block_p */ &cur_block, + dex_pc_to_block_map); SuccessorBlockInfo* successor_block_info = static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor)); @@ -559,7 +574,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs /* Fall-through case */ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true, - /* immed_pred_block_p */ nullptr); + /* immed_pred_block_p */ nullptr, + dex_pc_to_block_map); cur_block->fall_through = fallthrough_block->id; fallthrough_block->predecessors.push_back(cur_block->id); return cur_block; @@ -568,7 +584,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs /* Process instructions with the kThrow flag */ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, int flags, ArenaBitVector* try_block_addr, - const uint16_t* code_ptr, const uint16_t* code_end) { + const uint16_t* code_ptr, const uint16_t* code_end, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { UNUSED(flags); bool in_try_block = try_block_addr->IsBitSet(cur_offset); bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW); @@ -585,7 +602,8 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse for (; iterator.HasNext(); iterator.Next()) { BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */, - nullptr /* immed_pred_block_p */); + nullptr /* immed_pred_block_p */, + dex_pc_to_block_map); if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT && IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) { // Don't allow monitor-exit to catch its own exception, http://b/15745363 . @@ -620,7 +638,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse cur_block->explicit_throw = true; if (code_ptr < code_end) { // Force creation of new block following THROW via side-effect. - FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr); + FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map); } if (!in_try_block) { // Don't split a THROW that can't rethrow - we're done. @@ -652,7 +670,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse * not automatically terminated after the work portion, and may * contain following instructions. * - * Note also that the dex_pc_to_block_map_ entry for the potentially + * Note also that the dex_pc_to_block_map entry for the potentially * throwing instruction will refer to the original basic block. */ BasicBlock* new_block = CreateNewBB(kDalvikByteCode); @@ -687,7 +705,11 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ // TODO: need to rework expansion of block list & try_block_addr when inlining activated. // TUNING: use better estimate of basic blocks for following resize. block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_); - dex_pc_to_block_map_.resize(dex_pc_to_block_map_.size() + current_code_item_->insns_size_in_code_units_); + // FindBlock lookup cache. + ScopedArenaAllocator allocator(&cu_->arena_stack); + ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter()); + dex_pc_to_block_map.resize(dex_pc_to_block_map.size() + + current_code_item_->insns_size_in_code_units_); // TODO: replace with explicit resize routine. Using automatic extension side effect for now. try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_); @@ -728,7 +750,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ cur_block->predecessors.push_back(entry_block_->id); /* Identify code range in try blocks and set up the empty catch blocks */ - ProcessTryCatchBlocks(); + ProcessTryCatchBlocks(&dex_pc_to_block_map); uint64_t merged_df_flags = 0u; @@ -777,20 +799,21 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ DCHECK(cur_block->taken == NullBasicBlockId); // Unreachable instruction, mark for no continuation and end basic block. flags &= ~Instruction::kContinue; - FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr); + FindBlock(current_offset_ + width, /* create */ true, + /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map); } } else { cur_block->AppendMIR(insn); } // Associate the starting dex_pc for this opcode with its containing basic block. - dex_pc_to_block_map_[insn->offset] = cur_block->id; + dex_pc_to_block_map[insn->offset] = cur_block->id; code_ptr += width; if (flags & Instruction::kBranch) { cur_block = ProcessCanBranch(cur_block, insn, current_offset_, - width, flags, code_ptr, code_end); + width, flags, code_ptr, code_end, &dex_pc_to_block_map); } else if (flags & Instruction::kReturn) { cur_block->terminated_by_return = true; cur_block->fall_through = exit_block_->id; @@ -804,13 +827,15 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ * Create a fallthrough block for real instructions * (incl. NOP). */ - FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr); + FindBlock(current_offset_ + width, /* create */ true, + /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map); } } else if (flags & Instruction::kThrow) { cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_, - code_ptr, code_end); + code_ptr, code_end, &dex_pc_to_block_map); } else if (flags & Instruction::kSwitch) { - cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags); + cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, + flags, &dex_pc_to_block_map); } if (verify_flags & Instruction::kVerifyVarArgRange || verify_flags & Instruction::kVerifyVarArgRangeNonZero) { @@ -828,7 +853,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ } current_offset_ += width; BasicBlock* next_block = FindBlock(current_offset_, /* create */ false, - /* immed_pred_block_p */ nullptr); + /* immed_pred_block_p */ nullptr, + &dex_pc_to_block_map); if (next_block) { /* * The next instruction could be the target of a previously parsed diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 5def19128c..5914245f5b 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -542,8 +542,9 @@ class MIRGraph { uint32_t method_idx, jobject class_loader, const DexFile& dex_file); /* Find existing block */ - BasicBlock* FindBlock(DexOffset code_offset) { - return FindBlock(code_offset, false, NULL); + BasicBlock* FindBlock(DexOffset code_offset, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map) { + return FindBlock(code_offset, false, nullptr, dex_pc_to_block_map); } const uint16_t* GetCurrentInsns() const { @@ -1249,16 +1250,20 @@ class MIRGraph { bool ContentIsInsn(const uint16_t* code_ptr); BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block, BasicBlock** immed_pred_block_p); - BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p); - void ProcessTryCatchBlocks(); + BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map); + void ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map); bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset); BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, - int flags, const uint16_t* code_ptr, const uint16_t* code_end); + int flags, const uint16_t* code_ptr, const uint16_t* code_end, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map); BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, - int flags); + int flags, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map); BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width, int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr, - const uint16_t* code_end); + const uint16_t* code_end, + ScopedArenaVector<uint16_t>* dex_pc_to_block_map); int AddNewSReg(int v_reg); void HandleSSAUse(int* uses, int dalvik_reg, int reg_index); void DataFlowSSAFormat35C(MIR* mir); @@ -1391,7 +1396,6 @@ class MIRGraph { BasicBlock* entry_block_; BasicBlock* exit_block_; const DexFile::CodeItem* current_code_item_; - ArenaVector<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache. ArenaVector<DexCompilationUnit*> m_units_; // List of methods included in this graph typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset) ArenaVector<MIRLocation> method_stack_; // Include stack diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index f15b727857..1b5dde2a3b 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -52,16 +52,13 @@ namespace art { */ void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpSparseSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable *tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; uint32_t size = table[1]; - tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value @@ -100,17 +97,13 @@ void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocati void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpPackedSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable *tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; uint32_t size = table[1]; - tab_rec->targets = - static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index 6492442b94..f5407ae9ba 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -51,16 +51,13 @@ namespace art { */ void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpSparseSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable *tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; uint32_t size = table[1]; - tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value @@ -103,17 +100,13 @@ void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLoca void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpPackedSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable *tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; uint32_t size = table[1]; - tab_rec->targets = - static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 04113dba81..88a4605a6b 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -557,29 +557,49 @@ void Mir2Lir::InstallSwitchTables() { LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset; } if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { - const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2])); - for (int elems = 0; elems < tab_rec->table[1]; elems++) { - int disp = tab_rec->targets[elems]->offset - bx_offset; + DCHECK(tab_rec->switch_mir != nullptr); + BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb); + DCHECK(bb != nullptr); + int elems = 0; + for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) { + int key = successor_block_info->key; + int target = successor_block_info->block; + LIR* boundary_lir = InsertCaseLabel(target, key); + DCHECK(boundary_lir != nullptr); + int disp = boundary_lir->offset - bx_offset; + Push32(code_buffer_, key); + Push32(code_buffer_, disp); if (cu_->verbose) { LOG(INFO) << " Case[" << elems << "] key: 0x" - << std::hex << keys[elems] << ", disp: 0x" + << std::hex << key << ", disp: 0x" << std::hex << disp; } - Push32(code_buffer_, keys[elems]); - Push32(code_buffer_, - tab_rec->targets[elems]->offset - bx_offset); + elems++; } + DCHECK_EQ(elems, tab_rec->table[1]); } else { DCHECK_EQ(static_cast<int>(tab_rec->table[0]), static_cast<int>(Instruction::kPackedSwitchSignature)); - for (int elems = 0; elems < tab_rec->table[1]; elems++) { - int disp = tab_rec->targets[elems]->offset - bx_offset; + DCHECK(tab_rec->switch_mir != nullptr); + BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb); + DCHECK(bb != nullptr); + int elems = 0; + int low_key = s4FromSwitchData(&tab_rec->table[2]); + for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) { + int key = successor_block_info->key; + DCHECK_EQ(elems + low_key, key); + int target = successor_block_info->block; + LIR* boundary_lir = InsertCaseLabel(target, key); + DCHECK(boundary_lir != nullptr); + int disp = boundary_lir->offset - bx_offset; + Push32(code_buffer_, disp); if (cu_->verbose) { LOG(INFO) << " Case[" << elems << "] disp: 0x" << std::hex << disp; } - Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset); + elems++; } + DCHECK_EQ(elems, tab_rec->table[1]); } } } @@ -830,13 +850,15 @@ int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) { * branch table during the assembly phase. All resource flags * are set to prevent code motion. KeyVal is just there for debugging. */ -LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { - LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id]; +LIR* Mir2Lir::InsertCaseLabel(uint32_t bbid, int keyVal) { + LIR* boundary_lir = &block_label_list_[bbid]; LIR* res = boundary_lir; if (cu_->verbose) { // Only pay the expense if we're pretty-printing. LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); - new_label->dalvik_offset = vaddr; + BasicBlock* bb = mir_graph_->GetBasicBlock(bbid); + DCHECK(bb != nullptr); + new_label->dalvik_offset = bb->start_offset; new_label->opcode = kPseudoCaseLabel; new_label->operands[0] = keyVal; new_label->flags.fixup = kFixupLabel; @@ -848,40 +870,6 @@ LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) { return res; } -void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { - const uint16_t* table = tab_rec->table; - DexOffset base_vaddr = tab_rec->vaddr; - const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); - int entries = table[1]; - int low_key = s4FromSwitchData(&table[2]); - for (int i = 0; i < entries; i++) { - tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key); - } -} - -void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) { - const uint16_t* table = tab_rec->table; - DexOffset base_vaddr = tab_rec->vaddr; - int entries = table[1]; - const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); - const int32_t* targets = &keys[entries]; - for (int i = 0; i < entries; i++) { - tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]); - } -} - -void Mir2Lir::ProcessSwitchTables() { - for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) { - if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) { - MarkPackedCaseLabels(tab_rec); - } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) { - MarkSparseCaseLabels(tab_rec); - } else { - LOG(FATAL) << "Invalid switch table"; - } - } -} - void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) { /* * Sparse switch data format: @@ -1032,9 +1020,6 @@ void Mir2Lir::Materialize() { /* Method is not empty */ if (first_lir_insn_) { - // mark the targets of switch statement case labels - ProcessSwitchTables(); - /* Convert LIR into machine code. */ AssembleLIR(); diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index ccfdaf60bb..0719b52309 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -68,17 +68,13 @@ bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& s */ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpSparseSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; int elements = table[1]; - tab_rec->targets = - static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // The table is composed of 8-byte key/disp pairs @@ -145,17 +141,13 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca */ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { const uint16_t* table = mir_graph_->GetTable(mir, table_offset); - if (cu_->verbose) { - DumpPackedSwitchTable(table); - } // Add the table to the list - we'll process it later SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; int size = table[1]; - tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), - kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 888c34eb24..9f1a497a7b 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -224,7 +224,7 @@ class Mir2Lir { struct SwitchTable : EmbeddedData { LIR* anchor; // Reference instruction for relative offsets. - LIR** targets; // Array of case targets. + MIR* switch_mir; // The switch mir. }; /* Static register use counts */ @@ -653,7 +653,6 @@ class Mir2Lir { LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx); LIR* AddWordData(LIR* *constant_list_p, int value); LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); - void ProcessSwitchTables(); void DumpSparseSwitchTable(const uint16_t* table); void DumpPackedSwitchTable(const uint16_t* table); void MarkBoundary(DexOffset offset, const char* inst_str); @@ -671,9 +670,7 @@ class Mir2Lir { int AssignLiteralOffset(CodeOffset offset); int AssignSwitchTablesOffset(CodeOffset offset); int AssignFillArrayDataOffset(CodeOffset offset); - virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal); - virtual void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec); - void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec); + LIR* InsertCaseLabel(uint32_t bbid, int keyVal); // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. virtual RegLocation NarrowRegLoc(RegLocation loc); diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index aa0972f861..284e8f6c0a 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -37,84 +37,6 @@ void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocat } /* - * We override InsertCaseLabel, because the first parameter represents - * a basic block id, instead of a dex offset. - */ -LIR* X86Mir2Lir::InsertCaseLabel(DexOffset bbid, int keyVal) { - LIR* boundary_lir = &block_label_list_[bbid]; - LIR* res = boundary_lir; - if (cu_->verbose) { - // Only pay the expense if we're pretty-printing. - LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR)); - BasicBlock* bb = mir_graph_->GetBasicBlock(bbid); - DCHECK(bb != nullptr); - new_label->dalvik_offset = bb->start_offset;; - new_label->opcode = kPseudoCaseLabel; - new_label->operands[0] = keyVal; - new_label->flags.fixup = kFixupLabel; - DCHECK(!new_label->flags.use_def_invalid); - new_label->u.m.def_mask = &kEncodeAll; - InsertLIRAfter(boundary_lir, new_label); - res = new_label; - } - return res; -} - -void X86Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) { - const uint16_t* table = tab_rec->table; - const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]); - int entries = table[1]; - int low_key = s4FromSwitchData(&table[2]); - for (int i = 0; i < entries; i++) { - // The value at targets[i] is a basic block id, instead of a dex offset. - tab_rec->targets[i] = InsertCaseLabel(targets[i], i + low_key); - } -} - -/* - * We convert and create a new packed switch table that stores - * basic block ids to targets[] by examining successor blocks. - * Note that the original packed switch table stores dex offsets to targets[]. - */ -const uint16_t* X86Mir2Lir::ConvertPackedSwitchTable(MIR* mir, const uint16_t* table) { - /* - * The original packed switch data format: - * ushort ident = 0x0100 magic value - * ushort size number of entries in the table - * int first_key first (and lowest) switch case value - * int targets[size] branch targets, relative to switch opcode - * - * Total size is (4+size*2) 16-bit code units. - * - * Note that the new packed switch data format is the same as the original - * format, except that targets[] are basic block ids. - * - */ - BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb); - DCHECK(bb != nullptr); - // Get the number of entries. - int entries = table[1]; - const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]); - int32_t starting_key = as_int32[0]; - // Create a new table. - int size = sizeof(uint16_t) * (4 + entries * 2); - uint16_t* new_table = reinterpret_cast<uint16_t*>(arena_->Alloc(size, kArenaAllocMisc)); - // Copy ident, size, and first_key to the new table. - memcpy(new_table, table, sizeof(uint16_t) * 4); - // Get the new targets. - int32_t* new_targets = reinterpret_cast<int32_t*>(&new_table[4]); - // Find out targets for each entry. - int i = 0; - for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) { - DCHECK_EQ(starting_key + i, successor_block_info->key); - // Save target basic block id. - new_targets[i++] = successor_block_info->block; - } - DCHECK_EQ(i, entries); - return new_table; -} - -/* * Code pattern will look something like: * * mov r_val, .. @@ -131,16 +53,14 @@ const uint16_t* X86Mir2Lir::ConvertPackedSwitchTable(MIR* mir, const uint16_t* t * done: */ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { - const uint16_t* old_table = mir_graph_->GetTable(mir, table_offset); - const uint16_t* table = ConvertPackedSwitchTable(mir, old_table); + const uint16_t* table = mir_graph_->GetTable(mir, table_offset); // Add the table to the list - we'll process it later SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); + tab_rec->switch_mir = mir; tab_rec->table = table; tab_rec->vaddr = current_dalvik_offset_; int size = table[1]; - tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), - kArenaAllocLIR)); switch_tables_.push_back(tab_rec); // Get the switch value diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 811d4f5d7b..ca60400059 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -271,11 +271,8 @@ class X86Mir2Lir : public Mir2Lir { int first_bit, int second_bit) OVERRIDE; void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE; void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE; - const uint16_t* ConvertPackedSwitchTable(MIR* mir, const uint16_t* table); void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE; void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE; - LIR* InsertCaseLabel(DexOffset vaddr, int keyVal) OVERRIDE; - void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) OVERRIDE; /** * @brief Implement instanceof a final class with x86 specific code. |