diff options
author | buzbee <buzbee@google.com> | 2013-10-11 10:49:56 -0700 |
---|---|---|
committer | buzbee <buzbee@google.com> | 2013-10-11 10:56:31 -0700 |
commit | 409fe94ad529d9334587be80b9f6a3d166805508 (patch) | |
tree | 1bd570273fcdf0f99d3ef273ad1b5a4b47acd3d6 /compiler/dex/quick/local_optimizations.cc | |
parent | 29f86e5d7631e58fbe6fbd7888cfb2b0851417b4 (diff) | |
download | android_art-409fe94ad529d9334587be80b9f6a3d166805508.tar.gz android_art-409fe94ad529d9334587be80b9f6a3d166805508.tar.bz2 android_art-409fe94ad529d9334587be80b9f6a3d166805508.zip |
Quick assembler fix
This CL re-instates the select pattern optimization disabled by
CL 374310, and fixes the underlying problem: improper handling of
the kPseudoBarrier LIR opcode. The bug was introduced in the
recent assembler restructuring. In short, LIR pseudo opcodes (which
have values < 0), should always have size 0 - and thus cause no
bits to be emitted during assembly. In this case, bad logic caused
us to set the size of a kPseudoBarrier opcode via lookup through the
EncodingMap.
Because all pseudo ops are < 0, this meant we did an array underflow
load, picking up whatever garbage was located before the EncodingMap.
This explains why this error showed up recently - we'd previuosly just
gotten a lucky layout.
This CL corrects the faulty logic, and adds DCHECKs to uses of
the EncodingMap to ensure that we don't try to access w/ a
pseudo op. Additionally, the existing is_pseudo_op() macro is
replaced with IsPseudoLirOp(), named similar to the existing
IsPseudoMirOp().
Change-Id: I46761a0275a923d85b545664cadf052e1ab120dc
Diffstat (limited to 'compiler/dex/quick/local_optimizations.cc')
-rw-r--r-- | compiler/dex/quick/local_optimizations.cc | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index f915779e75..0f29578c4e 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -78,7 +78,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { - if (is_pseudo_opcode(this_lir->opcode)) { + if (IsPseudoLirOp(this_lir->opcode)) { continue; } @@ -135,7 +135,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) { + if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) { continue; } @@ -285,7 +285,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Start from the second instruction */ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { - if (is_pseudo_opcode(this_lir->opcode)) { + if (IsPseudoLirOp(this_lir->opcode)) { continue; } @@ -362,7 +362,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * Store the dependent or non-pseudo/indepedent instruction to the * list. */ - if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { + if (stop_here || !IsPseudoLirOp(check_lir->opcode)) { prev_inst_list[next_slot++] = check_lir; if (next_slot == MAX_HOIST_DISTANCE) { break; @@ -393,7 +393,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { int slot; LIR* dep_lir = prev_inst_list[next_slot-1]; /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */ - if (!is_pseudo_opcode(dep_lir->opcode) && + if (!IsPseudoLirOp(dep_lir->opcode) && (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) { first_slot -= LDLD_DISTANCE; } @@ -434,7 +434,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * Try to find two instructions with load/use dependency until * the remaining instructions are less than LD_LATENCY. */ - bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false : + bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false : (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD); if (((cur_lir->u.m.use_mask & prev_lir->u.m.def_mask) && prev_is_load) || (slot < LD_LATENCY)) { break; |