summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick/local_optimizations.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex/quick/local_optimizations.cc')
-rw-r--r--compiler/dex/quick/local_optimizations.cc38
1 files changed, 19 insertions, 19 deletions
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index cb7694de68..f915779e75 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -21,8 +21,8 @@ namespace art {
#define DEBUG_OPT(X)
/* Check RAW, WAR, and RAW dependency on the register operands */
-#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
- ((use | def) & check->def_mask))
+#define CHECK_REG_DEP(use, def, check) ((def & check->u.m.use_mask) || \
+ ((use | def) & check->u.m.def_mask))
/* Scheduler heuristics */
#define MAX_HOIST_DISTANCE 20
@@ -30,10 +30,10 @@ namespace art {
#define LD_LATENCY 2
static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
- int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
- int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
- int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
- int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
+ int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->flags.alias_info);
+ int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->flags.alias_info);
+ int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->flags.alias_info);
+ int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->flags.alias_info);
return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
}
@@ -106,7 +106,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
bool is_this_lir_load = target_flags & IS_LOAD;
LIR* check_lir;
/* Use the mem mask to determine the rough memory location */
- uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
+ uint64_t this_mem_mask = (this_lir->u.m.use_mask | this_lir->u.m.def_mask) & ENCODE_MEM;
/*
* Currently only eliminate redundant ld/st for constant and Dalvik
@@ -116,10 +116,10 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
continue;
}
- uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
uint64_t stop_use_reg_mask;
if (cu_->instruction_set == kX86) {
- stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
+ stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM;
} else {
/*
* Add pc to the resource mask to prevent this instruction
@@ -127,7 +127,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
* region bits since stop_mask is used to check data/control
* dependencies.
*/
- stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+ stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->u.m.use_mask) & ~ENCODE_MEM;
}
for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
@@ -139,7 +139,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
continue;
}
- uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
+ uint64_t check_mem_mask = (check_lir->u.m.use_mask | check_lir->u.m.def_mask) & ENCODE_MEM;
uint64_t alias_condition = this_mem_mask & check_mem_mask;
bool stop_here = false;
@@ -159,7 +159,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
*/
DCHECK(!(check_flags & IS_STORE));
/* Same value && same register type */
- if (check_lir->alias_info == this_lir->alias_info &&
+ if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
SameRegType(check_lir->operands[0], native_reg_id)) {
/*
* Different destination register - insert
@@ -172,7 +172,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
}
} else if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias */
- if (check_lir->alias_info == this_lir->alias_info) {
+ if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
/* Only optimize compatible registers */
bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
if ((is_this_lir_load && is_check_lir_load) ||
@@ -297,7 +297,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
continue;
}
- uint64_t stop_use_all_mask = this_lir->use_mask;
+ uint64_t stop_use_all_mask = this_lir->u.m.use_mask;
if (cu_->instruction_set != kX86) {
/*
@@ -313,7 +313,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
/* Similar as above, but just check for pure register dependency */
uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
- uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
int next_slot = 0;
bool stop_here = false;
@@ -328,7 +328,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
continue;
}
- uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
+ uint64_t check_mem_mask = check_lir->u.m.def_mask & ENCODE_MEM;
uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
stop_here = false;
@@ -337,7 +337,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
/* We can fully disambiguate Dalvik references */
if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias or partually overlap */
- if ((check_lir->alias_info == this_lir->alias_info) ||
+ if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
IsDalvikRegisterClobbered(this_lir, check_lir)) {
stop_here = true;
}
@@ -406,7 +406,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
LIR* prev_lir = prev_inst_list[slot+1];
/* Check the highest instruction */
- if (prev_lir->def_mask == ENCODE_ALL) {
+ if (prev_lir->u.m.def_mask == ENCODE_ALL) {
/*
* If the first instruction is a load, don't hoist anything
* above it since it is unlikely to be beneficial.
@@ -436,7 +436,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
*/
bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
(GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
- if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
+ if (((cur_lir->u.m.use_mask & prev_lir->u.m.def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
break;
}
}