summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc4
-rw-r--r--compiler/dex/local_value_numbering.cc79
-rw-r--r--compiler/dex/local_value_numbering.h2
-rw-r--r--compiler/dex/local_value_numbering_test.cc78
-rw-r--r--compiler/dex/quick/codegen_util.cc1
-rw-r--r--compiler/dex/quick/gen_loadstore.cc6
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc9
-rw-r--r--compiler/dex/quick/quick_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator.cc42
-rw-r--r--compiler/optimizing/code_generator.h10
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/utils/arm/assembler_arm.cc31
-rw-r--r--compiler/utils/arm/assembler_arm32.cc2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.h8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc35
-rw-r--r--runtime/arch/arch_test.cc6
-rw-r--r--runtime/class_linker.cc10
-rw-r--r--runtime/dex_file.cc6
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc6
-rw-r--r--runtime/native/java_lang_Runtime.cc2
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/utils.h66
-rwxr-xr-xtest/030-bad-finalizer/check20
-rwxr-xr-xtest/059-finalizer-throw/check20
-rwxr-xr-xtest/099-vmdebug/check20
-rw-r--r--test/Android.run-test.mk13
-rwxr-xr-xtest/etc/run-test-jar2
34 files changed, 375 insertions, 149 deletions
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f7968c225a..7e916bee4a 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -238,7 +238,7 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
+ if (fast_path && !is_volatile && IsUint<16>(field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
@@ -274,7 +274,7 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
&target_method, &vtable_idx,
&direct_code, &direct_method);
if (fast_path && original_invoke_type == invoke_type) {
- if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
+ if (vtable_idx >= 0 && IsUint<16>(vtable_idx)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
<< " to " << Instruction::Name(new_opcode)
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index d677680777..99b6683b26 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1214,6 +1214,31 @@ uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
return value_name;
}
+uint16_t LocalValueNumbering::HandleConst(MIR* mir, uint32_t value) {
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ uint16_t res;
+ if (value == 0u && raw_dest.ref) {
+ res = GlobalValueNumbering::kNullValue;
+ } else {
+ Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+ res = gvn_->LookupValue(op, Low16Bits(value), High16Bits(value), 0);
+ }
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ return res;
+}
+
+uint16_t LocalValueNumbering::HandleConstWide(MIR* mir, uint64_t value) {
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+ uint32_t low_word = Low32Bits(value);
+ uint32_t high_word = High32Bits(value);
+ uint16_t low_res = gvn_->LookupValue(op, Low16Bits(low_word), High16Bits(low_word), 1);
+ uint16_t high_res = gvn_->LookupValue(op, Low16Bits(high_word), High16Bits(high_word), 2);
+ uint16_t res = gvn_->LookupValue(op, low_res, high_res, 3);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ return res;
+}
+
uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, array);
@@ -1652,58 +1677,28 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
break;
case Instruction::CONST_HIGH16:
- if (mir->dalvikInsn.vB != 0) {
- res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
- SetOperandValue(mir->ssa_rep->defs[0], res);
- break;
- }
- FALLTHROUGH_INTENDED;
+ res = HandleConst(mir, mir->dalvikInsn.vB << 16);
+ break;
case Instruction::CONST:
case Instruction::CONST_4:
case Instruction::CONST_16:
- if (mir->dalvikInsn.vB == 0 && gvn_->GetMirGraph()->GetRawDest(mir).ref) {
- res = GlobalValueNumbering::kNullValue;
- } else {
- res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB), 0);
- }
- SetOperandValue(mir->ssa_rep->defs[0], res);
+ res = HandleConst(mir, mir->dalvikInsn.vB);
break;
case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32: {
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB), 1);
- uint16_t high_res;
- if (mir->dalvikInsn.vB & 0x80000000) {
- high_res = gvn_->LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
- } else {
- high_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 2);
- }
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE_32:
+ res = HandleConstWide(
+ mir,
+ mir->dalvikInsn.vB +
+ ((mir->dalvikInsn.vB & 0x80000000) != 0 ? UINT64_C(0xffffffff00000000) : 0u));
break;
- case Instruction::CONST_WIDE: {
- uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
- uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(low_word),
- High16Bits(low_word), 1);
- uint16_t high_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(high_word),
- High16Bits(high_word), 2);
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE:
+ res = HandleConstWide(mir, mir->dalvikInsn.vB_wide);
break;
- case Instruction::CONST_WIDE_HIGH16: {
- uint16_t low_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 1);
- uint16_t high_res = gvn_->LookupValue(Instruction::CONST, 0,
- Low16Bits(mir->dalvikInsn.vB), 2);
- res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- }
+ case Instruction::CONST_WIDE_HIGH16:
+ res = HandleConstWide(mir, static_cast<uint64_t>(mir->dalvikInsn.vB) << 48);
break;
case Instruction::ARRAY_LENGTH: {
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index f51b886112..bfacf8ea53 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -308,6 +308,8 @@ class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
void HandleEscapingRef(uint16_t base);
void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
uint16_t HandlePhi(MIR* mir);
+ uint16_t HandleConst(MIR* mir, uint32_t value);
+ uint16_t HandleConstWide(MIR* mir, uint64_t value);
uint16_t HandleAGet(MIR* mir, uint16_t opcode);
void HandleAPut(MIR* mir, uint16_t opcode);
uint16_t HandleIGet(MIR* mir, uint16_t opcode);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 4490f0657b..d1c3a6b4ba 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -780,6 +780,7 @@ TEST_F(LocalValueNumberingTest, DivZeroCheck) {
TEST_F(LocalValueNumberingTest, ConstWide) {
static const MIRDef mirs[] = {
+ // Core reg constants.
DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
DEF_CONST(Instruction::CONST_WIDE_16, 1u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 2u, -1),
@@ -801,9 +802,86 @@ TEST_F(LocalValueNumberingTest, ConstWide) {
DEF_CONST(Instruction::CONST_WIDE, 18u, (INT64_C(1) << 48) - 1),
DEF_CONST(Instruction::CONST_WIDE, 19u, (INT64_C(-1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 20u, (INT64_C(-1) << 48) - 1),
+ // FP reg constants.
+ DEF_CONST(Instruction::CONST_WIDE_16, 21u, 0),
+ DEF_CONST(Instruction::CONST_WIDE_16, 22u, 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 23u, -1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 24u, 1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 25u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 26u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 27u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 28u, -(1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 29u, -(1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 30u, INT64_C(1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 31u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 32u, (INT64_C(1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 33u, (INT64_C(1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(-1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 35u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 36u, 1), // Effectively 1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 37u, 0xffff), // Effectively -1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 39u, (INT64_C(1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 41u, (INT64_C(-1) << 48) - 1),
};
PrepareMIRs(mirs);
+ for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
+ cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+ }
+ PerformLVN();
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ for (size_t j = i + 1u; j != mir_count_; ++j) {
+ EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
+ }
+ }
+}
+
+TEST_F(LocalValueNumberingTest, Const) {
+ static const MIRDef mirs[] = {
+ // Core reg constants.
+ DEF_CONST(Instruction::CONST_4, 0u, 0),
+ DEF_CONST(Instruction::CONST_4, 1u, 1),
+ DEF_CONST(Instruction::CONST_4, 2u, -1),
+ DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
+ DEF_CONST(Instruction::CONST_16, 4u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 8u, -(1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_HIGH16, 9u, 1), // Effectively 1 << 16.
+ DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff), // Effectively -1 << 16.
+ DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 13u, (-1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 14u, (-1 << 16) - 1),
+ // FP reg constants.
+ DEF_CONST(Instruction::CONST_4, 15u, 0),
+ DEF_CONST(Instruction::CONST_4, 16u, 1),
+ DEF_CONST(Instruction::CONST_4, 17u, -1),
+ DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
+ DEF_CONST(Instruction::CONST_16, 19u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
+ DEF_CONST(Instruction::CONST_16, 23u, -(1 << 4) - 1),
+ DEF_CONST(Instruction::CONST_HIGH16, 24u, 1), // Effectively 1 << 16.
+ DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff), // Effectively -1 << 16.
+ DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 28u, (-1 << 16) + 1),
+ DEF_CONST(Instruction::CONST, 29u, (-1 << 16) - 1),
+ // null reference constant.
+ DEF_CONST(Instruction::CONST_4, 30u, 0),
+ };
+
+ PrepareMIRs(mirs);
+ static_assert((arraysize(mirs) & 1) != 0, "missing null or unmatched fp/core");
+ cu_.mir_graph->reg_location_[arraysize(mirs) - 1].ref = true;
+ for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs) - 1; ++i) {
+ cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+ }
PerformLVN();
for (size_t i = 0u; i != mir_count_; ++i) {
for (size_t j = i + 1u; j != mir_count_; ++j) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 88a4605a6b..055c39f70c 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -865,7 +865,6 @@ LIR* Mir2Lir::InsertCaseLabel(uint32_t bbid, int keyVal) {
DCHECK(!new_label->flags.use_def_invalid);
new_label->u.m.def_mask = &kEncodeAll;
InsertLIRAfter(boundary_lir, new_label);
- res = new_label;
}
return res;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 9f36e35f5e..db844bcde9 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -44,7 +44,9 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
if (rl_dest.fp) {
int pmap_index = SRegToPMap(rl_dest.s_reg_low);
- if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
+ const bool is_fp_promoted = promotion_map_[pmap_index].fp_location == kLocPhysReg;
+ const bool is_core_promoted = promotion_map_[pmap_index].core_location == kLocPhysReg;
+ if (is_fp_promoted || is_core_promoted) {
// Now, determine if this vreg is ever used as a reference. If not, we're done.
bool used_as_reference = false;
int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
@@ -61,7 +63,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
temp_reg = AllocTemp();
LoadConstant(temp_reg, 0);
}
- if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
+ if (is_core_promoted) {
// Promoted - just copy in a zero
OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
} else {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 6f6bf68fea..ec6edabdbd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,7 +56,8 @@ LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
- return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
+ // For encodings, see LoadConstantNoClobber below.
+ return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -96,9 +97,11 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
/* See if the value can be constructed cheaply */
if (value == 0) {
res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
- } else if ((value > 0) && (value <= 65535)) {
+ } else if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
- } else if ((value < 0) && (value >= -32768)) {
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
} else {
res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index f39942973a..19c2a5a3a3 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -560,7 +560,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi
// (1 << kNullCheckElimination) |
// (1 << kClassInitCheckElimination) |
// (1 << kGlobalValueNumbering) |
- // (1 << kGvnDeadCodeElimination) |
+ (1 << kGvnDeadCodeElimination) |
// (1 << kLocalValueNumbering) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d0739a6de2..bf3ed14b48 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -40,6 +40,16 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) {
return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
}
+static bool IsSingleGoto(HBasicBlock* block) {
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ // TODO: Remove the null check b/19084197.
+ return (block->GetFirstInstruction() != nullptr)
+ && (block->GetFirstInstruction() == block->GetLastInstruction())
+ && block->GetLastInstruction()->IsGoto()
+ // Back edges generate the suspend check.
+ && (loop_info == nullptr || !loop_info->IsBackEdge(block));
+}
+
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
Initialize();
if (!is_leaf) {
@@ -56,12 +66,38 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
CompileInternal(allocator, /* is_baseline */ true);
}
+bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
+ DCHECK_EQ(block_order_->Get(current_block_index_), current);
+ return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
+}
+
+HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
+ for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
+ HBasicBlock* block = block_order_->Get(i);
+ if (!IsSingleGoto(block)) {
+ return block;
+ }
+ }
+ return nullptr;
+}
+
+HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
+ while (IsSingleGoto(block)) {
+ block = block->GetSuccessors().Get(0);
+ }
+ return block;
+}
+
void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
GenerateFrameEntry();
for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
HBasicBlock* block = block_order_->Get(current_block_index_);
+ // Don't generate code for an empty block. Its predecessors will branch to its successor
+ // directly. Also, the label of that block will not be emitted, so this helps catch
+ // errors where we reference that label.
+ if (IsSingleGoto(block)) continue;
Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
@@ -338,12 +374,6 @@ void CodeGenerator::AllocateLocations(HInstruction* instruction) {
}
}
-bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
- DCHECK_EQ(block_order_->Get(current_block_index_), current);
- return (current_block_index_ < block_order_->Size() - 1)
- && (block_order_->Get(current_block_index_ + 1) == next);
-}
-
CodeGenerator* CodeGenerator::Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index efd0c84797..6c78f10500 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -91,6 +91,8 @@ class CodeGenerator {
HGraph* GetGraph() const { return graph_; }
+ HBasicBlock* GetNextBlockToEmit() const;
+ HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
@@ -314,6 +316,14 @@ class CodeGenerator {
return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
}
+ // Arm64 has its own type for a label, so we need to templatize this method
+ // to share the logic.
+ template <typename T>
+ T* CommonGetLabelOf(T* raw_pointer_to_labels_array, HBasicBlock* block) const {
+ block = FirstNonEmptyBlock(block);
+ return raw_pointer_to_labels_array + block->GetBlockId();
+ }
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 47d81ff984..f1a3729c13 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -252,7 +252,7 @@ class CodeGeneratorARM : public CodeGenerator {
void MarkGCCard(Register temp, Register card, Register object, Register value);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e937e2c0f..afb7fc3718 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -214,7 +214,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
vixl::Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_ + block->GetBlockId();
+ return CommonGetLabelOf<vixl::Label>(block_labels_, block);
}
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 107ddafea4..f5a9b7d1f7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -234,7 +234,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void LoadCurrentMethod(Register reg);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dbdbf869db..707c9992c0 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -232,7 +232,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void LoadCurrentMethod(CpuRegister reg);
Label* GetLabelOf(HBasicBlock* block) const {
- return block_labels_.GetRawStorage() + block->GetBlockId();
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
}
void Initialize() OVERRIDE {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1f44f19b23..a52e6eb30f 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -166,7 +166,7 @@ uint32_t ShifterOperand::encodingThumb() const {
}
uint32_t Address::encodingArm() const {
- CHECK(IsAbsoluteUint(12, offset_));
+ CHECK(IsAbsoluteUint<12>(offset_));
uint32_t encoding;
if (is_immed_offset_) {
if (offset_ < 0) {
@@ -278,11 +278,12 @@ uint32_t Address::encoding3() const {
// Encoding for vfp load/store addressing.
uint32_t Address::vencoding() const {
+ CHECK(IsAbsoluteUint<10>(offset_)); // In the range -1020 to +1020.
+ CHECK_ALIGNED(offset_, 2); // Multiple of 4.
+
const uint32_t offset_mask = (1 << 12) - 1;
uint32_t encoding = encodingArm();
uint32_t offset = encoding & offset_mask;
- CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
- CHECK_ALIGNED(offset, 2); // Multiple of 4.
CHECK((am_ == Offset) || (am_ == NegOffset));
uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
if (am_ == Offset) {
@@ -298,13 +299,13 @@ bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
case kLoadSignedHalfword:
case kLoadUnsignedHalfword:
case kLoadWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -316,13 +317,13 @@ bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
switch (type) {
case kStoreHalfword:
case kStoreWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -336,12 +337,12 @@ bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
case kLoadUnsignedHalfword:
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kLoadWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -354,12 +355,12 @@ bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
case kStoreHalfword:
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kStoreWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 8d1fb60725..95796916b4 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1254,7 +1254,7 @@ void Arm32Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
void Arm32Assembler::svc(uint32_t imm24) {
- CHECK(IsUint(24, imm24)) << imm24;
+ CHECK(IsUint<24>(imm24)) << imm24;
int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
Emit(encoding);
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 5383c28f82..6d0571e263 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2080,7 +2080,7 @@ void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
void Thumb2Assembler::svc(uint32_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B14 | B12 |
B11 | B10 | B9 | B8 |
imm8;
@@ -2089,7 +2089,7 @@ void Thumb2Assembler::svc(uint32_t imm8) {
void Thumb2Assembler::bkpt(uint16_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B13 | B12 |
B11 | B10 | B9 |
imm8;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 03744e4149..8f4208b417 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1290,7 +1290,7 @@ void X86Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1325,7 +1325,7 @@ void X86Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 3a44ace649..2dde90744e 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -35,10 +35,10 @@ class Immediate : public ValueObject {
int32_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
private:
const int32_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 556fa9b38f..f2704b72a4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1515,7 +1515,7 @@ void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int32_t v32 = static_cast<int32_t>(imm.value());
- if (IsInt32(8, v32)) {
+ if (IsInt<8>(v32)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1555,7 +1555,7 @@ void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int64_t v64 = imm.value();
- if (IsInt64(8, v64)) {
+ if (IsInt<8>(v64)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1705,7 +1705,7 @@ void X86_64Assembler::notq(CpuRegister reg) {
void X86_64Assembler::enter(const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC8);
- CHECK(imm.is_uint16());
+ CHECK(imm.is_uint16()) << imm.value();
EmitUint8(imm.value() & 0xFF);
EmitUint8((imm.value() >> 8) & 0xFF);
EmitUint8(0x00);
@@ -1759,7 +1759,7 @@ void X86_64Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1796,7 +1796,7 @@ void X86_64Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index a1c704e94c..5dfcf4541b 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -42,15 +42,11 @@ class Immediate : public ValueObject {
int64_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
- bool is_int32() const {
- // This does not work on 32b machines: return IsInt(32, value_);
- int64_t limit = static_cast<int64_t>(1) << 31;
- return (-limit <= value_) && (value_ < limit);
- }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
+ bool is_int32() const { return IsInt<32>(value_); }
private:
const int64_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6df4144004..00f508b23f 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -44,10 +44,10 @@ static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerfu
TEST(AssemblerX86_64, SignExtension) {
// 32bit.
for (int32_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::random_device rd;
@@ -55,54 +55,65 @@ TEST(AssemblerX86_64, SignExtension) {
std::uniform_int_distribution<int32_t> uniform_dist(256, INT32_MAX);
for (size_t i = 0; i < kRandomIterations; i++) {
int32_t value = uniform_dist(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int32_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int32_t> uniform_dist2(INT32_MIN, -256);
for (size_t i = 0; i < 100; i++) {
int32_t value = uniform_dist2(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// 64bit.
for (int64_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist3(256, INT64_MAX);
for (size_t i = 0; i < 100; i++) {
int64_t value = uniform_dist3(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int64_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int64_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist4(INT64_MIN, -256);
for (size_t i = 0; i < kRandomIterations; i++) {
int64_t value = uniform_dist4(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
+
+ int64_t value = INT64_C(0x1200000010);
+ x86_64::Immediate imm(value);
+ EXPECT_FALSE(imm.is_int8());
+ EXPECT_FALSE(imm.is_int16());
+ EXPECT_FALSE(imm.is_int32());
+ value = INT64_C(0x8000000000000001);
+ x86_64::Immediate imm2(value);
+ EXPECT_FALSE(imm2.is_int8());
+ EXPECT_FALSE(imm2.is_int16());
+ EXPECT_FALSE(imm2.is_int32());
}
struct X86_64CpuRegisterCompare {
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index ab6b00bddf..5733ab6361 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -24,6 +24,12 @@ namespace art {
class ArchTest : public CommonRuntimeTest {
protected:
+ void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ // Use 64-bit ISA for runtime setup to make method size potentially larger
+ // than necessary (rather than smaller) during CreateCalleeSaveMethod
+ options->push_back(std::make_pair("imageinstructionset", "x86_64"));
+ }
+
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* r = Runtime::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3592d2cb63..f554c61e36 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3511,7 +3511,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
StringPrintf("Rejecting class %s that attempts to sub-class erroneous class %s",
PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(super.Get()).c_str()));
- LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
+ LOG(WARNING) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
if (cause.Get() != nullptr) {
self->ClearException();
@@ -3584,7 +3584,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
}
}
} else {
- LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.Get())
+ LOG(WARNING) << "Verification failed on class " << PrettyDescriptor(klass.Get())
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< " because: " << error_msg;
self->AssertNoPendingException();
@@ -4812,7 +4812,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
- if (!IsUint(16, count)) {
+ if (!IsUint<16>(count)) {
ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
return false;
}
@@ -5033,7 +5033,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
local_method->SetMethodIndex(actual_count);
++actual_count;
}
- if (!IsUint(16, actual_count)) {
+ if (!IsUint<16>(actual_count)) {
ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
return false;
}
@@ -5049,7 +5049,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
klass->SetVTable(vtable.Get());
} else {
CHECK_EQ(klass.Get(), GetClassRoot(kJavaLangObject));
- if (!IsUint(16, num_virtual_methods)) {
+ if (!IsUint<16>(num_virtual_methods)) {
ThrowClassFormatError(klass.Get(), "Too many methods: %d",
static_cast<int>(num_virtual_methods));
return false;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 94d62db9e6..19a4bd0b6f 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1164,15 +1164,15 @@ void EncodedStaticFieldValueIterator::Next() {
break;
case kByte:
jval_.i = ReadSignedInt(ptr_, value_arg);
- CHECK(IsInt(8, jval_.i));
+ CHECK(IsInt<8>(jval_.i));
break;
case kShort:
jval_.i = ReadSignedInt(ptr_, value_arg);
- CHECK(IsInt(16, jval_.i));
+ CHECK(IsInt<16>(jval_.i));
break;
case kChar:
jval_.i = ReadUnsignedInt(ptr_, value_arg, false);
- CHECK(IsUint(16, jval_.i));
+ CHECK(IsUint<16>(jval_.i));
break;
case kInt:
jval_.i = ReadSignedInt(ptr_, value_arg);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 85a0b99ab1..01c17acdcc 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -25,6 +25,12 @@ namespace art {
class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
protected:
+ void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ // Use 64-bit ISA for runtime setup to make method size potentially larger
+ // than necessary (rather than smaller) during CreateCalleeSaveMethod
+ options->push_back(std::make_pair("imageinstructionset", "x86_64"));
+ }
+
static mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet isa,
Runtime::CalleeSaveType type)
NO_THREAD_SAFETY_ANALYSIS {
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index dc0cb7bad6..97b17bfd45 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -65,7 +65,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
Fn android_update_LD_LIBRARY_PATH = reinterpret_cast<Fn>(sym);
(*android_update_LD_LIBRARY_PATH)(ldLibraryPath.c_str());
} else {
- LOG(ERROR) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
+ LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 16edab397f..cb6ed64f60 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1854,7 +1854,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
}
void Thread::ThrowOutOfMemoryError(const char* msg) {
- LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
+ LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
ThrowLocation throw_location = GetCurrentLocationForThrow();
if (!tls32_.throwing_OutOfMemoryError) {
@@ -1862,7 +1862,7 @@ void Thread::ThrowOutOfMemoryError(const char* msg) {
ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
tls32_.throwing_OutOfMemoryError = false;
} else {
- Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one.
+ Dump(LOG(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
}
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 1c2576c2d4..0fbc9df5a7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -22,6 +22,7 @@
#include <limits>
#include <memory>
#include <string>
+#include <type_traits>
#include <vector>
#include "arch/instruction_set.h"
@@ -115,32 +116,45 @@ static inline bool IsInt(int N, intptr_t value) {
return (-limit <= value) && (value < limit);
}
-static inline bool IsInt32(int N, int32_t value) {
- CHECK_LT(0, N);
- CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int32_t));
- int32_t limit = static_cast<int32_t>(1) << (N - 1);
- return (-limit <= value) && (value < limit);
-}
-
-static inline bool IsInt64(int N, int64_t value) {
- CHECK_LT(0, N);
- CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int64_t));
- int64_t limit = static_cast<int64_t>(1) << (N - 1);
- return (-limit <= value) && (value < limit);
-}
-
-static inline bool IsUint(int N, intptr_t value) {
- CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerIntPtrT);
- intptr_t limit = static_cast<intptr_t>(1) << N;
- return (0 <= value) && (value < limit);
-}
-
-static inline bool IsAbsoluteUint(int N, intptr_t value) {
- CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerIntPtrT);
- if (value < 0) value = -value;
- return IsUint(N, value);
+template <typename T>
+static constexpr T GetIntLimit(size_t bits) {
+ return
+ DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
+ DCHECK_CONSTEXPR(bits < kBitsPerByte * sizeof(T), "kBits must be < max.", 0)
+ static_cast<T>(1) << (bits - 1);
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsInt(T value) {
+ static_assert(kBits > 0, "kBits cannot be zero.");
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be <= max.");
+ static_assert(std::is_signed<T>::value, "Needs a signed type.");
+ // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is
+ // trivially true.
+ return (kBits == kBitsPerByte * sizeof(T)) ?
+ true :
+ (-GetIntLimit<T>(kBits) <= value) && (value < GetIntLimit<T>(kBits));
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsUint(T value) {
+ static_assert(kBits > 0, "kBits cannot be zero.");
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be <= max.");
+ static_assert(std::is_integral<T>::value, "Needs an integral type.");
+ // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is
+ // trivially true.
+ return (0 <= value) &&
+ (kBits == kBitsPerByte * sizeof(T) ||
+ (static_cast<typename std::make_unsigned<T>::type>(value) <=
+ GetIntLimit<typename std::make_unsigned<T>::type>(kBits + 1) - 1));
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsAbsoluteUint(T value) {
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be < max.");
+ return (kBits == kBitsPerByte * sizeof(T)) ?
+ true :
+ IsUint<kBits, T>(value < 0 ? -value : value);
}
static inline uint16_t Low16Bits(uint32_t value) {
diff --git a/test/030-bad-finalizer/check b/test/030-bad-finalizer/check
new file mode 100755
index 0000000000..e5d5c4eef7
--- /dev/null
+++ b/test/030-bad-finalizer/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip timeout logging. These are "E/System" messages.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/059-finalizer-throw/check b/test/059-finalizer-throw/check
new file mode 100755
index 0000000000..8bc59c6bf4
--- /dev/null
+++ b/test/059-finalizer-throw/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip uncaught exception logging. These are "E/System" messages.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/099-vmdebug/check b/test/099-vmdebug/check
new file mode 100755
index 0000000000..57111bcc99
--- /dev/null
+++ b/test/099-vmdebug/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip the process pids and line numbers from exact error messages.
+sed -e '/^art E.*\] /d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index a8f2001930..c8e0ec5216 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -271,6 +271,19 @@ endif
TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
+# This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
+# when already tracing, and writes an error message that we do not want to check for.
+TEST_ART_BROKEN_TRACING_RUN_TESTS := \
+ 802-deoptimization
+
+ifneq (,$(filter trace,$(TRACE_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+ $(PICTEST_TYPES),$(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_TRACING_RUN_TESTS :=
+
# The following tests use libarttest.so, which is linked against libartd.so, so will
# not work when libart.so is the one loaded.
# TODO: Find a way to run these tests in ndebug mode.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 04eea4e64c..0c49674ff1 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -370,7 +370,7 @@ else
if [ "$DEV_MODE" = "y" ]; then
export ANDROID_LOG_TAGS='*:d'
else
- export ANDROID_LOG_TAGS='*:s'
+ export ANDROID_LOG_TAGS='*:e'
fi
export ANDROID_DATA="$DEX_LOCATION"
export ANDROID_ROOT="${ANDROID_ROOT}"