summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc4
-rw-r--r--compiler/dex/local_value_numbering.cc79
-rw-r--r--compiler/dex/local_value_numbering.h2
-rw-r--r--compiler/dex/local_value_numbering_test.cc78
-rw-r--r--compiler/dex/quick/codegen_util.cc1
-rw-r--r--compiler/dex/quick/gen_loadstore.cc6
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc9
-rw-r--r--compiler/dex/quick/quick_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator.cc42
-rw-r--r--compiler/optimizing/code_generator.h10
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/utils/arm/assembler_arm.cc31
-rw-r--r--compiler/utils/arm/assembler_arm32.cc2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.h8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc35
22 files changed, 238 insertions, 111 deletions
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f7968c225a..7e916bee4a 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -238,7 +238,7 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
+ if (fast_path && !is_volatile && IsUint<16>(field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
@@ -274,7 +274,7 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
&target_method, &vtable_idx,
&direct_code, &direct_method);
if (fast_path && original_invoke_type == invoke_type) {
- if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
+ if (vtable_idx >= 0 && IsUint<16>(vtable_idx)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
<< " to " << Instruction::Name(new_opcode)
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index d677680777..99b6683b26 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1214,6 +1214,31 @@ uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
return value_name;
}
+uint16_t LocalValueNumbering::HandleConst(MIR* mir, uint32_t value) {
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ uint16_t res;
+ if (value == 0u && raw_dest.ref) {
+ res = GlobalValueNumbering::kNullValue;
+ } else {
+ Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+ res = gvn_->LookupValue(op, Low16Bits(value), High16Bits(value), 0);
+ }
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ return res;
+}
+
+uint16_t LocalValueNumbering::HandleConstWide(MIR* mir, uint64_t value) {
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+ uint32_t low_word = Low32Bits(value);
+ uint32_t high_word = High32Bits(value);
+ uint16_t low_res = gvn_->LookupValue(op, Low16Bits(low_word), High16Bits(low_word), 1);
+ uint16_t high_res = gvn_->LookupValue(op, Low16Bits(high_word), High16Bits(high_word), 2);
+ uint16_t res = gvn_->LookupValue(op, low_res, high_res, 3);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ return res;
+}
+
uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, array);
@@ -1652,58 +1677,28 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
break;
case Instruction::CONST_HIGH16:
- if (mir->dalvikInsn.vB != 0) {
- res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
- SetOperandValue(mir->ssa_rep->defs[0], res);
- break;
- }
- FALLTHROUGH_INTENDED;
+ res = HandleConst(mir, mir->dalvikInsn.vB << 16);
+ break;
case Instruction::CONST:
case Instruction::CONST_4:
case Instruction::CONST_16:
- if (mir->dalvikInsn.vB == 0 && gvn_->GetMirGraph()->GetRawDest(mir).ref) {
- res = GlobalValueNumbering::kNullValue;
- } else {
- res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB), 0);
- }
- SetOperandValue(mir->ssa_rep->defs[0], res);
+ res = HandleConst(mir, mir->dalvikInsn.vB);
break;
case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32: {
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB), 1);
- uint16_t high_res;
- if (mir->dalvikInsn.vB & 0x80000000) {
- high_res = gvn_->LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
- } else {
- high_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 2);
- }
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE_32:
+ res = HandleConstWide(
+ mir,
+ mir->dalvikInsn.vB +
+ ((mir->dalvikInsn.vB & 0x80000000) != 0 ? UINT64_C(0xffffffff00000000) : 0u));
break;
- case Instruction::CONST_WIDE: {
- uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
- uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(low_word),
- High16Bits(low_word), 1);
- uint16_t high_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(high_word),
- High16Bits(high_word), 2);
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE:
+ res = HandleConstWide(mir, mir->dalvikInsn.vB_wide);
break;
- case Instruction::CONST_WIDE_HIGH16: {
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 1);
- uint16_t high_res = gvn_->LookupValue(Instruction::CONST, 0,
- Low16Bits(mir->dalvikInsn.vB), 2);
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE_HIGH16:
+ res = HandleConstWide(mir, static_cast<uint64_t>(mir->dalvikInsn.vB) << 48);
break;
case Instruction::ARRAY_LENGTH: {
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index f51b886112..bfacf8ea53 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -308,6 +308,8 @@ class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
void HandleEscapingRef(uint16_t base);
void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
uint16_t HandlePhi(MIR* mir);
+ uint16_t HandleConst(MIR* mir, uint32_t value);
+ uint16_t HandleConstWide(MIR* mir, uint64_t value);
uint16_t HandleAGet(MIR* mir, uint16_t opcode);
void HandleAPut(MIR* mir, uint16_t opcode);
uint16_t HandleIGet(MIR* mir, uint16_t opcode);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 4490f0657b..d1c3a6b4ba 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -780,6 +780,7 @@ TEST_F(LocalValueNumberingTest, DivZeroCheck) {
TEST_F(LocalValueNumberingTest, ConstWide) {
static const MIRDef mirs[] = {
+ // Core reg constants.
DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
DEF_CONST(Instruction::CONST_WIDE_16, 1u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 2u, -1),
@@ -801,9 +802,86 @@ TEST_F(LocalValueNumberingTest, ConstWide) {
DEF_CONST(Instruction::CONST_WIDE, 18u, (INT64_C(1) << 48) - 1),
DEF_CONST(Instruction::CONST_WIDE, 19u, (INT64_C(-1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 20u, (INT64_C(-1) << 48) - 1),
+ // FP reg constants.
+ DEF_CONST(Instruction::CONST_WIDE_16, 21u, 0),
+ DEF_CONST(Instruction::CONST_WIDE_16, 22u, 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 23u, -1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 24u, 1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 25u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 26u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 27u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 28u, -(1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 29u, -(1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 30u, INT64_C(1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 31u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 32u, (INT64_C(1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 33u, (INT64_C(1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(-1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 35u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 36u, 1), // Effectively 1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 37u, 0xffff), // Effectively -1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 39u, (INT64_C(1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 41u, (INT64_C(-1) << 48) - 1),
};
PrepareMIRs(mirs);
+ for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
+ cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+ }
+ PerformLVN();
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ for (size_t j = i + 1u; j != mir_count_; ++j) {
+ EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
+ }
+ }
+}
+
+TEST_F(LocalValueNumberingTest, Const) {
+ static const MIRDef mirs[] = {
+ // Core reg constants.
+ DEF_CONST(Instruction::CONST_4, 0u, 0),
+ DEF_CONST(Instruction::CONST_4, 1u, 1),
+ DEF_CONST(Instruction::CONST_4, 2u, -1),
+ DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
+ DEF_CONST(Instruction::CONST_16, 4u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 8u, -(1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_HIGH16, 9u, 1), // Effectively 1 << 16.
+ DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff), // Effectively -1 << 16.
+ DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 13u, (-1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 14u, (-1 << 16) - 1),
+ // FP reg constants.
+ DEF_CONST(Instruction::CONST_4, 15u, 0),
+ DEF_CONST(Instruction::CONST_4, 16u, 1),
+ DEF_CONST(Instruction::CONST_4, 17u, -1),
+ DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
+ DEF_CONST(Instruction::CONST_16, 19u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 23u, -(1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_HIGH16, 24u, 1), // Effectively 1 << 16.
+ DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff), // Effectively -1 << 16.
+ DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 28u, (-1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 29u, (-1 << 16) - 1),
+ // null reference constant.
+ DEF_CONST(Instruction::CONST_4, 30u, 0),
+ };
+
+ PrepareMIRs(mirs);
+ static_assert((arraysize(mirs) & 1) != 0, "missing null or unmatched fp/core");
+ cu_.mir_graph->reg_location_[arraysize(mirs) - 1].ref = true;
+ for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs) - 1; ++i) {
+ cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+ }
PerformLVN();
for (size_t i = 0u; i != mir_count_; ++i) {
for (size_t j = i + 1u; j != mir_count_; ++j) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 88a4605a6b..055c39f70c 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -865,7 +865,6 @@ LIR* Mir2Lir::InsertCaseLabel(uint32_t bbid, int keyVal) {
DCHECK(!new_label->flags.use_def_invalid);
new_label->u.m.def_mask = &kEncodeAll;
InsertLIRAfter(boundary_lir, new_label);
- res = new_label;
}
return res;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 9f36e35f5e..db844bcde9 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -44,7 +44,9 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
if (rl_dest.fp) {
int pmap_index = SRegToPMap(rl_dest.s_reg_low);
- if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
+ const bool is_fp_promoted = promotion_map_[pmap_index].fp_location == kLocPhysReg;
+ const bool is_core_promoted = promotion_map_[pmap_index].core_location == kLocPhysReg;
+ if (is_fp_promoted || is_core_promoted) {
// Now, determine if this vreg is ever used as a reference. If not, we're done.
bool used_as_reference = false;
int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
@@ -61,7 +63,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
temp_reg = AllocTemp();
LoadConstant(temp_reg, 0);
}
- if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
+ if (is_core_promoted) {
// Promoted - just copy in a zero
OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
} else {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 6f6bf68fea..ec6edabdbd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,7 +56,8 @@ LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
- return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
+ // For encodings, see LoadConstantNoClobber below.
+ return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -96,9 +97,11 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
/* See if the value can be constructed cheaply */
if (value == 0) {
res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
- } else if ((value > 0) && (value <= 65535)) {
+ } else if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
- } else if ((value < 0) && (value >= -32768)) {
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
} else {
res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index f39942973a..19c2a5a3a3 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -560,7 +560,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi
// (1 << kNullCheckElimination) |
// (1 << kClassInitCheckElimination) |
// (1 << kGlobalValueNumbering) |
- // (1 << kGvnDeadCodeElimination) |
+ (1 << kGvnDeadCodeElimination) |
// (1 << kLocalValueNumbering) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d0739a6de2..bf3ed14b48 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -40,6 +40,16 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) {
return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
}
+static bool IsSingleGoto(HBasicBlock* block) {
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ // TODO: Remove the null check b/19084197.
+ return (block->GetFirstInstruction() != nullptr)
+ && (block->GetFirstInstruction() == block->GetLastInstruction())
+ && block->GetLastInstruction()->IsGoto()
+ // Back edges generate the suspend check.
+ && (loop_info == nullptr || !loop_info->IsBackEdge(block));
+}
+
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
Initialize();
if (!is_leaf) {
@@ -56,12 +66,38 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
CompileInternal(allocator, /* is_baseline */ true);
}
+bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
+ DCHECK_EQ(block_order_->Get(current_block_index_), current);
+ return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
+}
+
+HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
+ for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
+ HBasicBlock* block = block_order_->Get(i);
+ if (!IsSingleGoto(block)) {
+ return block;
+ }
+ }
+ return nullptr;
+}
+
+HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
+ while (IsSingleGoto(block)) {
+ block = block->GetSuccessors().Get(0);
+ }
+ return block;
+}
+
void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
GenerateFrameEntry();
for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
HBasicBlock* block = block_order_->Get(current_block_index_);
+ // Don't generate code for an empty block. Its predecessors will branch to its successor
+ // directly. Also, the label of that block will not be emitted, so this helps catch
+ // errors where we reference that label.
+ if (IsSingleGoto(block)) continue;
Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
@@ -338,12 +374,6 @@ void CodeGenerator::AllocateLocations(HInstruction* instruction) {
}
}
-bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
- DCHECK_EQ(block_order_->Get(current_block_index_), current);
- return (current_block_index_ < block_order_->Size() - 1)
- && (block_order_->Get(current_block_index_ + 1) == next);
-}
-
CodeGenerator* CodeGenerator::Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index efd0c84797..6c78f10500 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -91,6 +91,8 @@ class CodeGenerator {
HGraph* GetGraph() const { return graph_; }
+ HBasicBlock* GetNextBlockToEmit() const;
+ HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
@@ -314,6 +316,14 @@ class CodeGenerator {
return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
}
+ // Arm64 has its own type for a label, so we need to templatize this method
+ // to share the logic.
+ template <typename T>
+ T* CommonGetLabelOf(T* raw_pointer_to_labels_array, HBasicBlock* block) const {
+ block = FirstNonEmptyBlock(block);
+ return raw_pointer_to_labels_array + block->GetBlockId();
+ }
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 47d81ff984..f1a3729c13 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -252,7 +252,7 @@ class CodeGeneratorARM : public CodeGenerator {
void MarkGCCard(Register temp, Register card, Register object, Register value);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e937e2c0f..afb7fc3718 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -214,7 +214,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
vixl::Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_ + block->GetBlockId();
+ return CommonGetLabelOf<vixl::Label>(block_labels_, block);
}
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 107ddafea4..f5a9b7d1f7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -234,7 +234,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void LoadCurrentMethod(Register reg);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dbdbf869db..707c9992c0 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -232,7 +232,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void LoadCurrentMethod(CpuRegister reg);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1f44f19b23..a52e6eb30f 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -166,7 +166,7 @@ uint32_t ShifterOperand::encodingThumb() const {
}
uint32_t Address::encodingArm() const {
- CHECK(IsAbsoluteUint(12, offset_));
+ CHECK(IsAbsoluteUint<12>(offset_));
uint32_t encoding;
if (is_immed_offset_) {
if (offset_ < 0) {
@@ -278,11 +278,12 @@ uint32_t Address::encoding3() const {
// Encoding for vfp load/store addressing.
uint32_t Address::vencoding() const {
+ CHECK(IsAbsoluteUint<10>(offset_)); // In the range -1020 to +1020.
+ CHECK_ALIGNED(offset_, 2); // Multiple of 4.
+
const uint32_t offset_mask = (1 << 12) - 1;
uint32_t encoding = encodingArm();
uint32_t offset = encoding & offset_mask;
- CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
- CHECK_ALIGNED(offset, 2); // Multiple of 4.
CHECK((am_ == Offset) || (am_ == NegOffset));
uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
if (am_ == Offset) {
@@ -298,13 +299,13 @@ bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
case kLoadSignedHalfword:
case kLoadUnsignedHalfword:
case kLoadWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -316,13 +317,13 @@ bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
switch (type) {
case kStoreHalfword:
case kStoreWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -336,12 +337,12 @@ bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
case kLoadUnsignedHalfword:
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kLoadWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -354,12 +355,12 @@ bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
case kStoreHalfword:
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kStoreWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 8d1fb60725..95796916b4 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1254,7 +1254,7 @@ void Arm32Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
void Arm32Assembler::svc(uint32_t imm24) {
- CHECK(IsUint(24, imm24)) << imm24;
+ CHECK(IsUint<24>(imm24)) << imm24;
int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
Emit(encoding);
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 5383c28f82..6d0571e263 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2080,7 +2080,7 @@ void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
void Thumb2Assembler::svc(uint32_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B14 | B12 |
B11 | B10 | B9 | B8 |
imm8;
@@ -2089,7 +2089,7 @@ void Thumb2Assembler::svc(uint32_t imm8) {
void Thumb2Assembler::bkpt(uint16_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B13 | B12 |
B11 | B10 | B9 |
imm8;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 03744e4149..8f4208b417 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1290,7 +1290,7 @@ void X86Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1325,7 +1325,7 @@ void X86Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 3a44ace649..2dde90744e 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -35,10 +35,10 @@ class Immediate : public ValueObject {
int32_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
private:
const int32_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 556fa9b38f..f2704b72a4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1515,7 +1515,7 @@ void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int32_t v32 = static_cast<int32_t>(imm.value());
- if (IsInt32(8, v32)) {
+ if (IsInt<8>(v32)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1555,7 +1555,7 @@ void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int64_t v64 = imm.value();
- if (IsInt64(8, v64)) {
+ if (IsInt<8>(v64)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1705,7 +1705,7 @@ void X86_64Assembler::notq(CpuRegister reg) {
void X86_64Assembler::enter(const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC8);
- CHECK(imm.is_uint16());
+ CHECK(imm.is_uint16()) << imm.value();
EmitUint8(imm.value() & 0xFF);
EmitUint8((imm.value() >> 8) & 0xFF);
EmitUint8(0x00);
@@ -1759,7 +1759,7 @@ void X86_64Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1796,7 +1796,7 @@ void X86_64Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index a1c704e94c..5dfcf4541b 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -42,15 +42,11 @@ class Immediate : public ValueObject {
int64_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
- bool is_int32() const {
- // This does not work on 32b machines: return IsInt(32, value_);
- int64_t limit = static_cast<int64_t>(1) << 31;
- return (-limit <= value_) && (value_ < limit);
- }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
+ bool is_int32() const { return IsInt<32>(value_); }
private:
const int64_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6df4144004..00f508b23f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -44,10 +44,10 @@ static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerfu
TEST(AssemblerX86_64, SignExtension) {
// 32bit.
for (int32_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::random_device rd;
@@ -55,54 +55,65 @@ TEST(AssemblerX86_64, SignExtension) {
std::uniform_int_distribution<int32_t> uniform_dist(256, INT32_MAX);
for (size_t i = 0; i < kRandomIterations; i++) {
int32_t value = uniform_dist(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int32_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int32_t> uniform_dist2(INT32_MIN, -256);
for (size_t i = 0; i < 100; i++) {
int32_t value = uniform_dist2(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// 64bit.
for (int64_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist3(256, INT64_MAX);
for (size_t i = 0; i < 100; i++) {
int64_t value = uniform_dist3(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int64_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int64_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist4(INT64_MIN, -256);
for (size_t i = 0; i < kRandomIterations; i++) {
int64_t value = uniform_dist4(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
+
+ int64_t value = INT64_C(0x1200000010);
+ x86_64::Immediate imm(value);
+ EXPECT_FALSE(imm.is_int8());
+ EXPECT_FALSE(imm.is_int16());
+ EXPECT_FALSE(imm.is_int32());
+ value = INT64_C(0x8000000000000001);
+ x86_64::Immediate imm2(value);
+ EXPECT_FALSE(imm2.is_int8());
+ EXPECT_FALSE(imm2.is_int16());
+ EXPECT_FALSE(imm2.is_int32());
}
struct X86_64CpuRegisterCompare {