summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2014-09-30 18:09:14 +0100
committerVladimir Marko <vmarko@google.com>2014-10-17 15:16:08 +0100
commit415ac88a6471792a28cf2b457fe4ba9dc099396e (patch)
tree1a83ac3a5f224568af19fc4bf148d352a1a4e49c
parent02e7d4e802248574cee7224fea3352b6e558e4ee (diff)
downloadart-415ac88a6471792a28cf2b457fe4ba9dc099396e.tar.gz
art-415ac88a6471792a28cf2b457fe4ba9dc099396e.tar.bz2
art-415ac88a6471792a28cf2b457fe4ba9dc099396e.zip
Quick: In GVN, apply modifications early if outside loop.
To improve GVN performance, apply modifications to blocks outside loops during the initial convergence phase. During the post processing phase, apply modifications only to the blocks belonging to loops. Also clean up the check whether to run the LVN and add the capability to limit the maximum number of nested loops we allow the GVN to process. Change-Id: Ie7f1254f91a442397c06a325d5d314d8f58e5012
-rw-r--r--compiler/dex/bb_optimizations.h2
-rw-r--r--compiler/dex/dataflow_iterator-inl.h24
-rw-r--r--compiler/dex/dataflow_iterator.h45
-rw-r--r--compiler/dex/frontend.cc1
-rw-r--r--compiler/dex/frontend.h1
-rw-r--r--compiler/dex/global_value_numbering.cc34
-rw-r--r--compiler/dex/global_value_numbering.h28
-rw-r--r--compiler/dex/global_value_numbering_test.cc7
-rw-r--r--compiler/dex/local_value_numbering.cc4
-rw-r--r--compiler/dex/local_value_numbering_test.cc4
-rw-r--r--compiler/dex/mir_graph.cc2
-rw-r--r--compiler/dex/mir_graph.h5
-rw-r--r--compiler/dex/mir_optimization.cc35
-rw-r--r--compiler/dex/pass_driver_me.h3
-rw-r--r--compiler/dex/pass_me.h1
15 files changed, 108 insertions, 88 deletions
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index fba086369..764a4cf08 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -178,7 +178,7 @@ class NullCheckElimination : public PassME {
class TypeInference : public PassME {
public:
TypeInference()
- : PassME("TypeInference", kRepeatingTopologicalSortTraversal, "4_post_type_cfg") {
+ : PassME("TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_type_cfg") {
}
bool Worker(PassDataHolder* data) const {
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 89f2b5c69..6e25db6f0 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -115,6 +115,30 @@ inline BasicBlock* AllNodesIterator::Next(bool had_change) {
return res;
}
+inline BasicBlock* TopologicalSortIterator::Next(bool had_change) {
+ // Update changed: if had_changed is true, we remember it for the whole iteration.
+ changed_ |= had_change;
+
+ while (loop_head_stack_->size() != 0u &&
+ (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
+ loop_head_stack_->pop_back();
+ }
+
+ if (idx_ == end_idx_) {
+ return nullptr;
+ }
+
+ // Get next block and return it.
+ BasicBlockId idx = idx_;
+ idx_ += 1;
+ BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
+ DCHECK(bb != nullptr);
+ if ((*loop_ends_)[idx] != 0u) {
+ loop_head_stack_->push_back(std::make_pair(idx, false)); // Not recalculating.
+ }
+ return bb;
+}
+
inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
if (idx_ != 0) {
// Mark last processed block visited.
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 188f1d9ff..9f17a3ec8 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -333,7 +333,9 @@ namespace art {
* @param mir_graph The MIRGraph considered.
*/
explicit TopologicalSortIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()) {
+ : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
+ loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
+ loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
// Extra setup for TopologicalSortIterator.
idx_ = start_idx_;
block_id_list_ = &mir_graph->GetTopologicalSortOrder();
@@ -344,44 +346,11 @@ namespace art {
* @param had_change did the user of the iteration change the previous BasicBlock.
* @return the next BasicBlock following the iteration order, 0 if finished.
*/
- virtual BasicBlock* Next(bool had_change = false) {
- // Update changed: if had_changed is true, we remember it for the whole iteration.
- changed_ |= had_change;
-
- return ForwardSingleNext();
- }
- };
-
- /**
- * @class RepeatingTopologicalSortIterator
- * @brief Used to perform a Topological Sort Iteration of a MIRGraph.
- * @details If there is a change during an iteration, the iteration starts over at the end of the
- * iteration.
- */
- class RepeatingTopologicalSortIterator : public DataflowIterator {
- public:
- /**
- * @brief The constructor, using all of the reachable blocks of the MIRGraph.
- * @param mir_graph The MIRGraph considered.
- */
- explicit RepeatingTopologicalSortIterator(MIRGraph* mir_graph)
- : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()) {
- // Extra setup for RepeatingTopologicalSortIterator.
- idx_ = start_idx_;
- block_id_list_ = &mir_graph->GetTopologicalSortOrder();
- }
+ virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
- /**
- * @brief Get the next BasicBlock depending on iteration order.
- * @param had_change did the user of the iteration change the previous BasicBlock.
- * @return the next BasicBlock following the iteration order, 0 if finished.
- */
- virtual BasicBlock* Next(bool had_change = false) {
- // Update changed: if had_changed is true, we remember it for the whole iteration.
- changed_ |= had_change;
-
- return ForwardRepeatNext();
- }
+ private:
+ const ArenaVector<BasicBlockId>* const loop_ends_;
+ ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
};
/**
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 07f30334f..2e21d052e 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -41,6 +41,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi
// (1 << kNullCheckElimination) |
// (1 << kClassInitCheckElimination) |
// (1 << kGlobalValueNumbering) |
+ // (1 << kLocalValueNumbering) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
// (1 << kSafeOptimizations) |
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 51b6d68dc..bed3b9730 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -41,6 +41,7 @@ enum opt_control_vector {
kNullCheckElimination,
kClassInitCheckElimination,
kGlobalValueNumbering,
+ kLocalValueNumbering,
kPromoteRegs,
kTrackLiveTemps,
kSafeOptimizations,
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index af57529a0..f0f7a7051 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -20,14 +20,16 @@
namespace art {
-GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
+GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator,
+ Mode mode)
: cu_(cu),
mir_graph_(cu->mir_graph.get()),
allocator_(allocator),
bbs_processed_(0u),
max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
last_value_(0u),
- modifications_allowed_(false),
+ modifications_allowed_(true),
+ mode_(mode),
global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
field_index_reverse_map_(allocator->Adapter()),
@@ -48,19 +50,19 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
if (UNLIKELY(!Good())) {
return nullptr;
}
- if (UNLIKELY(bb->data_flow_info == nullptr)) {
- return nullptr;
- }
- if (UNLIKELY(bb->block_type == kExitBlock)) {
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
DCHECK(bb->first_mir_insn == nullptr);
return nullptr;
}
- if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
+ if (mode_ == kModeGvn && UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
// If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
- if (!modifications_allowed_) {
- last_value_ = kNoValue; // Make bad.
- return nullptr;
- }
+ last_value_ = kNoValue; // Make bad.
+ return nullptr;
+ }
+ if (mode_ == kModeGvnPostProcessing &&
+ mir_graph_->GetTopologicalSortOrderLoopHeadStack()->empty()) {
+ // Modifications outside loops are performed during the main phase.
+ return nullptr;
}
if (allocator == nullptr) {
allocator = allocator_;
@@ -74,6 +76,7 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
work_lvn_->SetValueNameNullChecked(value_name);
}
+ DCHECK(bb->first_mir_insn == nullptr); // modifications_allowed_ is irrelevant.
} else {
// To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
DCHECK(merge_lvns_.empty());
@@ -83,19 +86,18 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
// topological order, or we're recalculating a loop head and need to merge all incoming
// LVNs. When we're not at a loop head (including having an empty loop head stack) all
// predecessors should be preceding blocks and we shall merge all of them anyway.
- //
- // If we're running the modification phase of the full GVN, the loop head stack will be
- // empty and we need to merge all incoming LVNs. If we're running just a simple LVN,
- // the loop head stack will also be empty and there will be nothing to merge anyway.
bool use_all_predecessors = true;
uint16_t loop_head_idx = 0u; // Used only if !use_all_predecessors.
- if (mir_graph_->GetTopologicalSortOrderLoopHeadStack()->size() != 0) {
+ if (mode_ == kModeGvn && mir_graph_->GetTopologicalSortOrderLoopHeadStack()->size() != 0) {
// Full GVN inside a loop, see if we're at the loop head for the first time.
+ modifications_allowed_ = false;
auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->back();
loop_head_idx = top.first;
bool recalculating = top.second;
use_all_predecessors = recalculating ||
loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()[bb->id];
+ } else {
+ modifications_allowed_ = true;
}
for (BasicBlockId pred_id : bb->predecessors) {
DCHECK_NE(pred_id, NullBasicBlockId);
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 27183bfe4..df554cdad 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -28,7 +28,18 @@ class MirFieldInfo;
class GlobalValueNumbering {
public:
- GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator);
+ enum Mode {
+ kModeGvn,
+ kModeGvnPostProcessing,
+ kModeLvn
+ };
+
+ static bool Skip(CompilationUnit* cu) {
+ return (cu->disable_opt & (1u << kGlobalValueNumbering)) != 0u ||
+ cu->mir_graph->GetMaxNestedLoops() > kMaxAllowedNestedLoops;
+ }
+
+ GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
~GlobalValueNumbering();
// Prepare LVN for the basic block.
@@ -44,13 +55,13 @@ class GlobalValueNumbering {
}
// Allow modifications.
- void AllowModifications() {
+ void StartPostProcessing() {
DCHECK(Good());
- modifications_allowed_ = true;
+ DCHECK_EQ(mode_, kModeGvn);
+ mode_ = kModeGvnPostProcessing;
}
bool CanModify() const {
- // TODO: DCHECK(Good()), see AllowModifications() and NewValueName().
return modifications_allowed_ && Good();
}
@@ -67,8 +78,7 @@ class GlobalValueNumbering {
// Allocate a new value name.
uint16_t NewValueName() {
- // TODO: No new values should be needed once we allow modifications.
- // DCHECK(!modifications_allowed_);
+ DCHECK_NE(mode_, kModeGvnPostProcessing);
++last_value_;
return last_value_;
}
@@ -208,6 +218,9 @@ class GlobalValueNumbering {
MIRGraph* mir_graph_;
ScopedArenaAllocator* const allocator_;
+ // The maximum number of nested loops that we accept for GVN.
+ static constexpr size_t kMaxAllowedNestedLoops = 6u;
+
// The number of BBs that we need to process grows exponentially with the number
// of nested loops. Don't allow excessive processing for too many nested loops or
// otherwise expensive methods.
@@ -225,6 +238,9 @@ class GlobalValueNumbering {
// LVN once for each BasicBlock.
bool modifications_allowed_;
+ // Specifies the mode of operation.
+ Mode mode_;
+
ValueMap global_value_map_;
FieldIndexMap field_index_map_;
ScopedArenaVector<const FieldIndexMap::value_type*> field_index_reverse_map_;
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 1d9920d24..82a11a55b 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -284,8 +284,8 @@ class GlobalValueNumberingTest : public testing::Test {
cu_.mir_graph->ComputeTopologicalSortOrder();
cu_.mir_graph->SSATransformationEnd();
ASSERT_TRUE(gvn_ == nullptr);
- gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
- ASSERT_FALSE(gvn_->CanModify());
+ gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
+ GlobalValueNumbering::kModeGvn));
value_names_.resize(mir_count_, 0xffffu);
IteratorType iterator(cu_.mir_graph.get());
bool change = false;
@@ -304,8 +304,7 @@ class GlobalValueNumberingTest : public testing::Test {
void PerformGVNCodeModifications() {
ASSERT_TRUE(gvn_ != nullptr);
ASSERT_TRUE(gvn_->Good());
- ASSERT_FALSE(gvn_->CanModify());
- gvn_->AllowModifications();
+ gvn_->StartPostProcessing();
TopologicalSortIterator iterator(cu_.mir_graph.get());
for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 0fb5e4885..8b7ae20b9 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1413,8 +1413,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::MONITOR_EXIT:
HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
// If we're running GVN and CanModify(), uneliminated null check indicates bytecode error.
- if ((gvn_->GetCompilationUnit()->disable_opt & (1u << kGlobalValueNumbering)) == 0u &&
- gvn_->CanModify() && (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+ if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 &&
+ gvn_->work_lvn_ != nullptr && gvn_->CanModify()) {
LOG(WARNING) << "Bytecode error: MONITOR_EXIT is still null checked at 0x" << std::hex
<< mir->offset << " in " << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file);
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 067bea28b..33d6c14ba 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -195,9 +195,9 @@ class LocalValueNumberingTest : public testing::Test {
value_names_() {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
- gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
+ gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
+ GlobalValueNumbering::kModeLvn));
lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
- gvn_->AllowModifications();
}
ArenaPool pool_;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 8dded79aa..3fa80b9c0 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -94,6 +94,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
topological_order_loop_ends_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
+ max_nested_loops_(0u),
i_dom_list_(NULL),
temp_scoped_alloc_(),
temp_insn_data_(nullptr),
@@ -1976,6 +1977,7 @@ void MIRGraph::ComputeTopologicalSortOrder() {
// Prepare the loop head stack for iteration.
topological_order_loop_head_stack_.clear();
topological_order_loop_head_stack_.reserve(max_nested_loops);
+ max_nested_loops_ = max_nested_loops;
}
bool BasicBlock::IsExceptionBlock() const {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 80303f675..a405af16a 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -708,6 +708,10 @@ class MIRGraph {
return &topological_order_loop_head_stack_;
}
+ size_t GetMaxNestedLoops() const {
+ return max_nested_loops_;
+ }
+
bool IsConst(int32_t s_reg) const {
return is_constant_v_->IsBitSet(s_reg);
}
@@ -1265,6 +1269,7 @@ class MIRGraph {
ArenaVector<uint16_t> topological_order_indexes_;
// Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
+ size_t max_nested_loops_;
int* i_dom_list_;
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
uint16_t* temp_insn_data_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 00528e5f4..96505ab72 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -408,14 +408,14 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
if (bb->block_type == kDead) {
return true;
}
- // Don't do a separate LVN if we did the GVN.
- bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u;
+ bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kLocalValueNumbering)) == 0u;
std::unique_ptr<ScopedArenaAllocator> allocator;
std::unique_ptr<GlobalValueNumbering> global_valnum;
std::unique_ptr<LocalValueNumbering> local_valnum;
if (use_lvn) {
allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get()));
+ global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get(),
+ GlobalValueNumbering::kModeLvn));
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
@@ -1297,7 +1297,7 @@ void MIRGraph::EliminateClassInitChecksEnd() {
}
bool MIRGraph::ApplyGlobalValueNumberingGate() {
- if ((cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u) {
+ if (GlobalValueNumbering::Skip(cu_)) {
return false;
}
@@ -1305,7 +1305,8 @@ bool MIRGraph::ApplyGlobalValueNumberingGate() {
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
DCHECK(temp_gvn_ == nullptr);
temp_gvn_.reset(
- new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get()));
+ new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get(),
+ GlobalValueNumbering::kModeGvn));
return true;
}
@@ -1324,19 +1325,23 @@ bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
void MIRGraph::ApplyGlobalValueNumberingEnd() {
// Perform modifications.
if (temp_gvn_->Good()) {
- temp_gvn_->AllowModifications();
- PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
- if (lvn != nullptr) {
- for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- lvn->GetValueNumber(mir);
+ if (max_nested_loops_ != 0u) {
+ temp_gvn_->StartPostProcessing();
+ TopologicalSortIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
+ LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
+ if (lvn != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ lvn->GetValueNumber(mir);
+ }
+ bool change = temp_gvn_->FinishBasicBlock(bb);
+ DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
- bool change = temp_gvn_->FinishBasicBlock(bb);
- DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
}
+ // GVN was successful, running the LVN would be useless.
+ cu_->disable_opt |= (1u << kLocalValueNumbering);
} else {
LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 537ceb665..7bfaf820d 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -67,9 +67,6 @@ class PassDriverME: public PassDriver<PassDriverType> {
case kTopologicalSortTraversal:
DoWalkBasicBlocks<TopologicalSortIterator>(&pass_me_data_holder_, me_pass);
break;
- case kRepeatingTopologicalSortTraversal:
- DoWalkBasicBlocks<RepeatingTopologicalSortIterator>(&pass_me_data_holder_, me_pass);
- break;
case kLoopRepeatingTopologicalSortTraversal:
DoWalkBasicBlocks<LoopRepeatingTopologicalSortIterator>(&pass_me_data_holder_, me_pass);
break;
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 8242cb879..2f3c8b221 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -55,7 +55,6 @@ enum DataFlowAnalysisMode {
kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
- kRepeatingTopologicalSortTraversal, /**< @brief Repeating Topological Order traversal. */
kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
kNoNodes, /**< @brief Skip BasicBlock traversal. */
};