summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/global_value_numbering.cc28
-rw-r--r--compiler/dex/global_value_numbering.h4
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc5
-rw-r--r--compiler/dex/gvn_dead_code_elimination_test.cc85
-rw-r--r--compiler/dex/mir_graph.cc11
-rw-r--r--compiler/dex/mir_graph.h15
-rw-r--r--compiler/dex/mir_optimization.cc26
-rw-r--r--compiler/dex/quick/arm/int_arm.cc11
-rw-r--r--compiler/dex/quick/mips/int_mips.cc12
-rw-r--r--compiler/driver/compiler_driver-inl.h25
-rw-r--r--compiler/driver/compiler_driver.cc21
-rw-r--r--compiler/driver/compiler_driver.h10
-rw-r--r--compiler/optimizing/builder.cc16
-rw-r--r--compiler/optimizing/code_generator.cc1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--compiler/optimizing/inliner.cc18
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/ssa_builder.cc40
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S5
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S3
-rw-r--r--runtime/art_method.cc4
-rw-r--r--runtime/class_linker.cc3
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
-rw-r--r--runtime/entrypoints_order_test.cc10
-rw-r--r--runtime/fault_handler.cc4
-rw-r--r--runtime/instrumentation.cc2
-rw-r--r--runtime/interpreter/interpreter_common.cc43
-rw-r--r--runtime/interpreter/interpreter_common.h5
-rw-r--r--runtime/jvalue.h2
-rw-r--r--runtime/native/java_lang_Class.cc20
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/quick_exception_handler.cc179
-rw-r--r--runtime/thread.cc113
-rw-r--r--runtime/thread.h77
-rw-r--r--test/044-proxy/src/ReturnsAndArgPassing.java2
-rw-r--r--test/492-checker-inline-invoke-interface/expected.txt0
-rw-r--r--test/492-checker-inline-invoke-interface/info.txt1
-rw-r--r--test/492-checker-inline-invoke-interface/src/Main.java29
-rw-r--r--test/498-type-propagation/expected.txt1
-rw-r--r--test/498-type-propagation/info.txt2
-rw-r--r--test/498-type-propagation/smali/TypePropagation.smali30
-rw-r--r--test/498-type-propagation/src/Main.java29
-rw-r--r--test/501-regression-packed-switch/expected.txt0
-rw-r--r--test/501-regression-packed-switch/info.txt2
-rw-r--r--test/501-regression-packed-switch/smali/Test.smali29
-rw-r--r--test/501-regression-packed-switch/src/Main.java33
-rw-r--r--test/504-regression-baseline-entry/expected.txt0
-rw-r--r--test/504-regression-baseline-entry/info.txt2
-rw-r--r--test/504-regression-baseline-entry/smali/Test.smali30
-rw-r--r--test/504-regression-baseline-entry/src/Main.java33
-rw-r--r--test/800-smali/expected.txt3
-rw-r--r--test/800-smali/smali/b_21614284.smali22
-rw-r--r--test/800-smali/smali/b_21873167.smali18
-rw-r--r--test/800-smali/smali/b_21902684.smali17
-rw-r--r--test/800-smali/src/Main.java4
-rwxr-xr-xtools/generate-operator-out.py10
57 files changed, 832 insertions, 272 deletions
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index e2b99871c8..94ba4fad2a 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -160,20 +160,10 @@ uint16_t GlobalValueNumbering::GetArrayLocation(uint16_t base, uint16_t index) {
return location;
}
-bool GlobalValueNumbering::HasNullCheckLastInsn(const BasicBlock* pred_bb,
- BasicBlockId succ_id) {
- if (pred_bb->block_type != kDalvikByteCode || pred_bb->last_mir_insn == nullptr) {
- return false;
- }
- Instruction::Code last_opcode = pred_bb->last_mir_insn->dalvikInsn.opcode;
- return ((last_opcode == Instruction::IF_EQZ && pred_bb->fall_through == succ_id) ||
- (last_opcode == Instruction::IF_NEZ && pred_bb->taken == succ_id));
-}
-
bool GlobalValueNumbering::NullCheckedInAllPredecessors(
const ScopedArenaVector<uint16_t>& merge_names) const {
// Implicit parameters:
- // - *work_lvn: the LVN for which we're checking predecessors.
+ // - *work_lvn_: the LVN for which we're checking predecessors.
// - merge_lvns_: the predecessor LVNs.
DCHECK_EQ(merge_lvns_.size(), merge_names.size());
for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
@@ -198,7 +188,7 @@ bool GlobalValueNumbering::NullCheckedInAllPredecessors(
bool GlobalValueNumbering::DivZeroCheckedInAllPredecessors(
const ScopedArenaVector<uint16_t>& merge_names) const {
// Implicit parameters:
- // - *work_lvn: the LVN for which we're checking predecessors.
+ // - *work_lvn_: the LVN for which we're checking predecessors.
// - merge_lvns_: the predecessor LVNs.
DCHECK_EQ(merge_lvns_.size(), merge_names.size());
for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
@@ -217,15 +207,11 @@ bool GlobalValueNumbering::IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_i
if (bb->predecessors.size() == 1u) {
BasicBlockId pred_id = bb->predecessors[0];
BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
- if (pred_bb->last_mir_insn != nullptr) {
- Instruction::Code opcode = pred_bb->last_mir_insn->dalvikInsn.opcode;
- if ((opcode == Instruction::IF_NEZ && pred_bb->taken == bb_id) ||
- (opcode == Instruction::IF_EQZ && pred_bb->fall_through == bb_id)) {
- DCHECK(lvns_[pred_id] != nullptr);
- uint16_t operand = lvns_[pred_id]->GetSregValue(pred_bb->last_mir_insn->ssa_rep->uses[0]);
- if (operand == cond) {
- return true;
- }
+ if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb_id)) {
+ DCHECK(lvns_[pred_id] != nullptr);
+ uint16_t operand = lvns_[pred_id]->GetSregValue(pred_bb->last_mir_insn->ssa_rep->uses[0]);
+ if (operand == cond) {
+ return true;
}
}
}
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index bd2f187d17..c514f75dcc 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -194,7 +194,9 @@ class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
return mir_graph_->GetBasicBlock(bb_id);
}
- static bool HasNullCheckLastInsn(const BasicBlock* pred_bb, BasicBlockId succ_id);
+ static bool HasNullCheckLastInsn(const BasicBlock* pred_bb, BasicBlockId succ_id) {
+ return pred_bb->BranchesToSuccessorOnlyIfNotZero(succ_id);
+ }
bool NullCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 6d8a7dab2b..b1f5d870d4 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -1003,7 +1003,6 @@ bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
vreg_chains_.GetMIRData(kill_heads_[v_reg])->PrevChange(v_reg));
}
}
- unused_vregs_->Union(vregs_to_kill_);
for (auto it = changes_to_kill_.rbegin(), end = changes_to_kill_.rend(); it != end; ++it) {
MIRData* data = vreg_chains_.GetMIRData(*it);
DCHECK(!data->must_keep);
@@ -1012,6 +1011,10 @@ bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
KillMIR(data);
}
+ // Each dependent register not in vregs_to_kill_ is either already marked unused or
+ // it's one word of a wide register where the other word has been overwritten.
+ unused_vregs_->UnionIfNotIn(dependent_vregs_, vregs_to_kill_);
+
vreg_chains_.RemoveTrailingNops();
return true;
}
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index de591d0edb..461c844a60 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -137,6 +137,8 @@ class GvnDeadCodeEliminationTest : public testing::Test {
{ bb, opcode, 0u, 0u, 1, { src1 }, 1, { result } }
#define DEF_BINOP(bb, opcode, result, src1, src2) \
{ bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_BINOP_WIDE(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 4, { src1, src1 + 1, src2, src2 + 1 }, 2, { result, result + 1 } }
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
cu_.mir_graph->ifield_lowering_infos_.clear();
@@ -1936,7 +1938,7 @@ TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
DEF_CONST(3, Instruction::CONST, 0u, 1000u),
DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
DEF_CONST(3, Instruction::CONST, 2u, 2000u),
- { 3, Instruction::INT_TO_LONG, 0, 0u, 1, { 2u }, 2, { 3u, 4u} },
+ { 3, Instruction::INT_TO_LONG, 0, 0u, 1, { 2u }, 2, { 3u, 4u } },
DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 5u, 3u),
DEF_CONST(3, Instruction::CONST, 7u, 3000u),
DEF_CONST(3, Instruction::CONST, 8u, 4000u),
@@ -1983,4 +1985,85 @@ TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
EXPECT_EQ(0u, int_to_long->dalvikInsn.vB);
}
+TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_CONST(3, Instruction::CONST, 1u, 2000u),
+ DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
+ DEF_CONST(3, Instruction::CONST, 3u, 1000u), // NOT killed (b/21702651).
+ DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u), // Killed (RecordPass)
+ DEF_CONST(3, Instruction::CONST, 5u, 2000u), // Killed with 9u (BackwardPass)
+ DEF_BINOP(3, Instruction::ADD_INT, 6u, 5u, 0u), // Killed (RecordPass)
+ DEF_CONST(3, Instruction::CONST, 7u, 4000u),
+ DEF_MOVE(3, Instruction::MOVE, 8u, 0u), // Killed with 6u (BackwardPass)
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 0, 3, 0, 3, 4, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 7 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_EQ(value_names_[2], value_names_[4]);
+ EXPECT_EQ(value_names_[1], value_names_[5]);
+ EXPECT_EQ(value_names_[2], value_names_[6]);
+ EXPECT_EQ(value_names_[0], value_names_[8]);
+
+ static const bool eliminated[] = {
+ false, false, false, false, true, true, true, false, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_CONST(3, Instruction::CONST, 1u, 2000u),
+ DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
+ DEF_CONST(3, Instruction::CONST, 3u, 1000u), // Killed (BackwardPass; b/21702651)
+ DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u), // Killed (RecordPass)
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 4000u),
+ { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 5u, 6u }, 1, { 7u } },
+ DEF_BINOP(3, Instruction::ADD_INT, 8u, 7u, 0u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 9u, 4000u), // Killed with 12u (BackwardPass)
+ DEF_CONST(3, Instruction::CONST, 11u, 6000u),
+ { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 9u, 10u }, 1, { 12u } }, // Killed with 9u (BP)
+ };
+
+ static const int32_t sreg_to_vreg_map[] = {
+ 2, 3, 4, 1, 4, 5, 6 /* high word */, 0, 7, 0, 1 /* high word */, 8, 0
+ };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 5, 9 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 5, 6, 7, 9 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_EQ(value_names_[2], value_names_[4]);
+ EXPECT_EQ(value_names_[5], value_names_[8]);
+ EXPECT_EQ(value_names_[6], value_names_[10]);
+
+ static const bool eliminated[] = {
+ false, false, false, true, true, false, false, false, true, false, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 9fa5148ced..920be0b4e6 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -398,12 +398,13 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
int monitor_reg = monitor_exit->VRegA_11x();
const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset);
- DCHECK(check_insn->Opcode() == Instruction::MOVE_EXCEPTION);
- if (check_insn->VRegA_11x() == monitor_reg) {
- // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
- return false;
+ if (check_insn->Opcode() == Instruction::MOVE_EXCEPTION) {
+ if (check_insn->VRegA_11x() == monitor_reg) {
+ // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
+ return false;
+ }
+ check_insn = check_insn->Next();
}
- check_insn = check_insn->Next();
while (true) {
int dest = -1;
bool wide = false;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index f038397e1e..dbe906280f 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -452,6 +452,21 @@ class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
MIR* GetFirstNonPhiInsn();
/**
+ * @brief Checks whether the block ends with if-nez or if-eqz that branches to
+ * the given successor only if the register in not zero.
+ */
+ bool BranchesToSuccessorOnlyIfNotZero(BasicBlockId succ_id) const {
+ if (last_mir_insn == nullptr) {
+ return false;
+ }
+ Instruction::Code last_opcode = last_mir_insn->dalvikInsn.opcode;
+ return ((last_opcode == Instruction::IF_EQZ && fall_through == succ_id) ||
+ (last_opcode == Instruction::IF_NEZ && taken == succ_id)) &&
+ // Make sure the other successor isn't the same (empty if), b/21614284.
+ (fall_through != taken);
+ }
+
+ /**
* @brief Used to obtain the next MIR that follows unconditionally.
* @details The implementation does not guarantee that a MIR does not
* follow even if this method returns nullptr.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 645511ed9f..5bb0ce3ba5 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -978,18 +978,12 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
MIR* null_check_insn = nullptr;
- if (pred_bb->block_type == kDalvikByteCode) {
- // Check to see if predecessor had an explicit null-check.
- MIR* last_insn = pred_bb->last_mir_insn;
- if (last_insn != nullptr) {
- Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
- if ((last_opcode == Instruction::IF_EQZ && pred_bb->fall_through == bb->id) ||
- (last_opcode == Instruction::IF_NEZ && pred_bb->taken == bb->id)) {
- // Remember the null check insn if there's no other predecessor requiring null check.
- if (!copied_first || !vregs_to_check->IsBitSet(last_insn->dalvikInsn.vA)) {
- null_check_insn = last_insn;
- }
- }
+ // Check to see if predecessor had an explicit null-check.
+ if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb->id)) {
+ // Remember the null check insn if there's no other predecessor requiring null check.
+ if (!copied_first || !vregs_to_check->IsBitSet(pred_bb->last_mir_insn->dalvikInsn.vA)) {
+ null_check_insn = pred_bb->last_mir_insn;
+ DCHECK(null_check_insn != nullptr);
}
}
if (!copied_first) {
@@ -1673,15 +1667,9 @@ void MIRGraph::StringChange() {
if (opcode == Instruction::NEW_INSTANCE) {
uint32_t type_idx = mir->dalvikInsn.vB;
if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
- // Change NEW_INSTANCE and throwing half of the insn (if it exists) into CONST_4 of 0
+ // Change NEW_INSTANCE into CONST_4 of 0
mir->dalvikInsn.opcode = Instruction::CONST_4;
mir->dalvikInsn.vB = 0;
- MIR* check_mir = GetBasicBlock(bb->predecessors[0])->last_mir_insn;
- if (check_mir != nullptr &&
- static_cast<int>(check_mir->dalvikInsn.opcode) == kMirOpCheck) {
- check_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- check_mir->dalvikInsn.vB = 0;
- }
}
} else if ((opcode == Instruction::INVOKE_DIRECT) ||
(opcode == Instruction::INVOKE_DIRECT_RANGE)) {
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 6d30e72f86..cf0188456d 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -471,13 +471,18 @@ void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_src.GetReg());
} else {
// Handle overlap
- if (r_src.GetHighReg() == r_dest.GetLowReg()) {
- DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
+ if (r_src.GetHighReg() != r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ } else if (r_src.GetLowReg() != r_dest.GetHighReg()) {
OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
} else {
+ RegStorage r_tmp = AllocTemp();
+ OpRegCopy(r_tmp, r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetHigh(), r_tmp);
+ FreeTemp(r_tmp);
}
}
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 9319c64784..f5ad7c7c33 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -258,13 +258,19 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
}
} else {
// Here if both src and dest are core registers.
- // Handle overlap.
- if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ // Handle overlap
+ if (r_src.GetHighReg() != r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ } else if (r_src.GetLowReg() != r_dest.GetHighReg()) {
OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
} else {
+ RegStorage r_tmp = AllocTemp();
+ OpRegCopy(r_tmp, r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetHigh(), r_tmp);
+ FreeTemp(r_tmp);
}
}
}
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b25e967609..e0c56fcc82 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -233,11 +233,32 @@ inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer
return referrer_class == fields_class;
}
+inline bool CompilerDriver::CanAssumeClassIsInitialized(mirror::Class* klass) {
+ // Being loaded is a pre-requisite for being initialized but let's do the cheap check first.
+ //
+ // NOTE: When AOT compiling an app, we eagerly initialize app classes (and potentially their
+ // super classes in the boot image) but only those that have a trivial initialization, i.e.
+ // without <clinit>() or static values in the dex file for that class or any of its super
+ // classes. So while we could see the klass as initialized during AOT compilation and have
+ // it only loaded at runtime, the needed initialization would have to be trivial and
+ // unobservable from Java, so we may as well treat it as initialized.
+ if (!klass->IsInitialized()) {
+ return false;
+ }
+ return CanAssumeClassIsLoaded(klass);
+}
+
+inline bool CompilerDriver::CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class,
+ mirror::Class* klass) {
+ return (referrer_class != nullptr && referrer_class->IsSubClass(klass)) ||
+ CanAssumeClassIsInitialized(klass);
+}
+
inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
ArtField* resolved_field) {
DCHECK(resolved_field->IsStatic());
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- return fields_class == referrer_class || fields_class->IsInitialized();
+ return CanReferrerAssumeClassIsInitialized(referrer_class, fields_class);
}
inline ArtMethod* CompilerDriver::ResolveMethod(
@@ -394,7 +415,7 @@ inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_cl
return true;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- return methods_class == referrer_class || methods_class->IsInitialized();
+ return CanReferrerAssumeClassIsInitialized(referrer_class, methods_class);
}
} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7b8b5b0238..e5fa54e989 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -659,7 +659,8 @@ void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const De
bool CompilerDriver::IsImageClass(const char* descriptor) const {
if (!IsImage()) {
- return true;
+ // NOTE: Currently unreachable, all callers check IsImage().
+ return false;
} else {
return image_classes_->find(descriptor) != image_classes_->end();
}
@@ -992,6 +993,24 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
}
}
+bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsAotCompiler()) {
+ DCHECK(runtime->UseJit());
+ // Having the klass reference here implies that the klass is already loaded.
+ return true;
+ }
+ if (!IsImage()) {
+ // Assume loaded only if klass is in the boot image. App classes cannot be assumed
+ // loaded because we don't even know what class loader will be used to load them.
+ bool class_in_image = runtime->GetHeap()->FindSpaceFromObject(klass, false)->IsImageSpace();
+ return class_in_image;
+ }
+ std::string temp;
+ const char* descriptor = klass->GetDescriptor(&temp);
+ return IsImageClass(descriptor);
+}
+
bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) {
if (IsImage() &&
IsImageClass(dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_))) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 68c905eb22..f737007308 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -501,6 +501,16 @@ class CompilerDriver {
uint32_t field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Can we assume that the klass is initialized?
+ bool CanAssumeClassIsInitialized(mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we assume that the klass is loaded?
+ bool CanAssumeClassIsLoaded(mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
enum {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 58416ee93b..4e747df43f 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -657,7 +657,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
(target_method.dex_method_index == dex_compilation_unit_->GetDexMethodIndex());
DCHECK(!is_recursive || (target_method.dex_file == dex_compilation_unit_->GetDexFile()));
- if (optimized_invoke_type == kStatic) {
+ if (optimized_invoke_type == kStatic && !is_string_init) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
@@ -1175,14 +1175,20 @@ bool HGraphBuilder::NeedsAccessCheck(uint32_t type_index) const {
}
void HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
+ // Verifier guarantees that the payload for PackedSwitch contains:
+ // (a) number of entries (may be zero)
+ // (b) first and lowest switch case value (entry 0, always present)
+ // (c) list of target pcs (entries 1 <= i <= N)
SwitchTable table(instruction, dex_pc, false);
// Value to test against.
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+ // Retrieve number of entries.
uint16_t num_entries = table.GetNumEntries();
- // There should be at least one entry here.
- DCHECK_GT(num_entries, 0U);
+ if (num_entries == 0) {
+ return;
+ }
// Chained cmp-and-branch, starting from starting_key.
int32_t starting_key = table.GetEntryAt(0);
@@ -1194,6 +1200,10 @@ void HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t d
}
void HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
+ // Verifier guarantees that the payload for SparseSwitch contains:
+ // (a) number of entries (may be zero)
+ // (b) sorted key values (entries 0 <= i < N)
+ // (c) target pcs corresponding to the switch values (entries N <= i < 2*N)
SwitchTable table(instruction, dex_pc, true);
// Value to test against.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a5d5305836..508e770494 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -236,7 +236,6 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
const GrowableArray<HBasicBlock*>& block_order) {
block_order_ = &block_order;
DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
- DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), block_order_->Get(1)));
ComputeSpillMask();
first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b0174b9b16..d767dfe3cc 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -372,7 +372,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
if (invoke->IsStringInit()) {
// temp = thread->string_init_entrypoint
- __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
+ __ gs()->movq(temp, Address::Absolute(invoke->GetStringInitOffset(), true));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e51732396d..b984ef7a42 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -55,7 +55,7 @@ void HInliner::Run() {
if (call != nullptr && call->GetIntrinsic() == Intrinsics::kNone) {
// We use the original invoke type to ensure the resolution of the called method
// works properly.
- if (!TryInline(call, call->GetDexMethodIndex(), call->GetOriginalInvokeType())) {
+ if (!TryInline(call, call->GetDexMethodIndex())) {
if (kIsDebugBuild) {
std::string callee_name =
PrettyMethod(call->GetDexMethodIndex(), *outer_compilation_unit_.GetDexFile());
@@ -69,22 +69,18 @@ void HInliner::Run() {
}
}
-bool HInliner::TryInline(HInvoke* invoke_instruction,
- uint32_t method_index,
- InvokeType invoke_type) const {
+bool HInliner::TryInline(HInvoke* invoke_instruction, uint32_t method_index) const {
ScopedObjectAccess soa(Thread::Current());
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
VLOG(compiler) << "Try inlining " << PrettyMethod(method_index, caller_dex_file);
- StackHandleScope<3> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file)));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader())));
- ArtMethod* resolved_method(compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type));
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+ // We can query the dex cache directly. The verifier has populated it already.
+ ArtMethod* resolved_method = class_linker->FindDexCache(caller_dex_file)->GetResolvedMethod(
+ method_index, class_linker->GetImagePointerSize());
if (resolved_method == nullptr) {
+ // Method cannot be resolved if it is in another dex file we do not have access to.
VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file);
return false;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 831bdf22a0..4602e77743 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -47,7 +47,7 @@ class HInliner : public HOptimization {
static constexpr const char* kInlinerPassName = "inliner";
private:
- bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
+ bool TryInline(HInvoke* invoke_instruction, uint32_t method_index) const;
bool TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
uint32_t method_index,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 59a2852735..9236f7c585 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -184,22 +184,24 @@ void SsaBuilder::FixNullConstantType() {
}
HInstruction* left = equality_instr->InputAt(0);
HInstruction* right = equality_instr->InputAt(1);
- HInstruction* null_instr = nullptr;
+ HInstruction* int_operand = nullptr;
- if ((left->GetType() == Primitive::kPrimNot) && right->IsIntConstant()) {
- null_instr = right;
- } else if ((right->GetType() == Primitive::kPrimNot) && left->IsIntConstant()) {
- null_instr = left;
+ if ((left->GetType() == Primitive::kPrimNot) && (right->GetType() == Primitive::kPrimInt)) {
+ int_operand = right;
+ } else if ((right->GetType() == Primitive::kPrimNot)
+ && (left->GetType() == Primitive::kPrimInt)) {
+ int_operand = left;
} else {
continue;
}
// If we got here, we are comparing against a reference and the int constant
// should be replaced with a null constant.
- if (null_instr->IsIntConstant()) {
- DCHECK_EQ(0, null_instr->AsIntConstant()->GetValue());
- equality_instr->ReplaceInput(GetGraph()->GetNullConstant(), null_instr == right ? 1 : 0);
- }
+ // Both type propagation and redundant phi elimination ensure `int_operand`
+ // can only be the 0 constant.
+ DCHECK(int_operand->IsIntConstant());
+ DCHECK_EQ(0, int_operand->AsIntConstant()->GetValue());
+ equality_instr->ReplaceInput(GetGraph()->GetNullConstant(), int_operand == right ? 1 : 0);
}
}
}
@@ -255,21 +257,18 @@ void SsaBuilder::BuildSsa() {
PrimitiveTypePropagation type_propagation(GetGraph());
type_propagation.Run();
- // 5) Fix the type for null constants which are part of an equality comparison.
- FixNullConstantType();
-
- // 6) When creating equivalent phis we copy the inputs of the original phi which
- // may be improperly typed. This will be fixed during the type propagation but
+ // 5) When creating equivalent phis we copy the inputs of the original phi which
+ // may be improperly typed. This was fixed during the type propagation in 4) but
// as a result we may end up with two equivalent phis with the same type for
// the same dex register. This pass cleans them up.
EquivalentPhisCleanup();
- // 7) Mark dead phis again. Step 4) may have introduced new phis.
- // Step 6) might enable the death of new phis.
+ // 6) Mark dead phis again. Step 4) may have introduced new phis.
+ // Step 5) might enable the death of new phis.
SsaDeadPhiElimination dead_phis(GetGraph());
dead_phis.MarkDeadPhis();
- // 8) Now that the graph is correctly typed, we can get rid of redundant phis.
+ // 7) Now that the graph is correctly typed, we can get rid of redundant phis.
// Note that we cannot do this phase before type propagation, otherwise
// we could get rid of phi equivalents, whose presence is a requirement for the
// type propagation phase. Note that this is to satisfy statement (a) of the
@@ -277,6 +276,13 @@ void SsaBuilder::BuildSsa() {
SsaRedundantPhiElimination redundant_phi(GetGraph());
redundant_phi.Run();
+ // 8) Fix the type for null constants which are part of an equality comparison.
+ // We need to do this after redundant phi elimination, to ensure the only cases
+ // that we can see are reference comparison against 0. The redundant phi
+ // elimination ensures we do not see a phi taking two 0 constants in a HEqual
+ // or HNotEqual.
+ FixNullConstantType();
+
// 9) Make sure environments use the right phi "equivalent": a phi marked dead
// can have a phi equivalent that is not dead. We must therefore update
// all environment uses of the dead phi to use its equivalent. Note that there
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 9cebb4ed55..17778e9610 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1384,8 +1384,9 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
- addl LITERAL(76), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-76)
+ addl LITERAL(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE))
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_proxy_invoke_handler
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index bd199dbb82..62eebd46e6 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1323,9 +1323,8 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
movq %rsp, %rcx // Pass SP.
call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %rax, %xmm0 // Copy return value in case of float returns.
- addq LITERAL(168 + 4*8), %rsp // Pop arguments.
- CFI_ADJUST_CFA_OFFSET(-168 - 4*8)
RETURN_OR_DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_proxy_invoke_handler
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index fbaf0ae217..16c099d31a 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -424,7 +424,9 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
// exception was thrown to force the activations to be removed from the
// stack. Continue execution in the interpreter.
self->ClearException();
- ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result);
+ ShadowFrame* shadow_frame =
+ self->PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
+ result->SetJ(self->PopDeoptimizationReturnValue().GetJ());
self->SetTopOfStack(nullptr);
self->SetTopOfShadowStack(shadow_frame);
interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ae952e6aef..d2805cdbd6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4894,6 +4894,9 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass
if (interface_name_comparator.HasSameNameAndSignature(
vtable_method_for_name_comparison)) {
if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
+ // Must do EndAssertNoThreadSuspension before throw since the throw can cause
+ // allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
ThrowIllegalAccessError(klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str());
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 3eefeef84a..9860fb057e 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,6 +28,7 @@ namespace art {
extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+ self->PushAndClearDeoptimizationReturnValue();
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index bc15cc79c9..2ea5cb0c41 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -647,7 +647,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
self->SetException(Thread::GetDeoptimizationException());
- self->SetDeoptimizationReturnValue(result);
+ self->SetDeoptimizationReturnValue(result, shorty[0] == 'L');
}
// No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 963dd0265f..0a5ebfa81b 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -72,6 +72,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tls32_, throwing_OutOfMemoryError, no_thread_suspension, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, no_thread_suspension, thread_exit_check_count, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, thread_exit_check_count, handling_signal_, 4);
+ EXPECT_OFFSET_DIFFP(Thread, tls32_, handling_signal_,
+ deoptimization_return_value_is_reference, 4);
// TODO: Better connection. Take alignment into account.
EXPECT_OFFSET_DIFF_GT3(Thread, tls32_.thread_exit_check_count, tls64_.trace_clock_base, 4,
@@ -103,11 +105,11 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, deoptimization_shadow_frame,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, stacked_shadow_frame_record,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_shadow_frame,
- shadow_frame_under_construction, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, shadow_frame_under_construction, name, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stacked_shadow_frame_record,
+ deoptimization_return_value_stack, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_return_value_stack, name, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 4a352ddf9a..762f061ce2 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -72,12 +72,10 @@ namespace art {
// Static fault manger object accessed by signal handler.
FaultManager fault_manager;
-extern "C" {
-void art_sigsegv_fault() {
+extern "C" __attribute__((visibility("default"))) void art_sigsegv_fault() {
// Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
}
-}
// Signal handler called on SIGSEGV.
static void art_fault_handler(int sig, siginfo_t* info, void* context) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4ced23d488..d37ddcb88b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1019,7 +1019,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
PrettyMethod(method).c_str(),
return_value.GetJ()) << *self;
}
- self->SetDeoptimizationReturnValue(return_value);
+ self->SetDeoptimizationReturnValue(return_value, return_shorty == 'L');
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 1ed1a649b8..a245890ad0 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -517,7 +517,8 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
// Slow path.
// We might need to do class loading, which incurs a thread state change to kNative. So
// register the shadow frame as under construction and allow suspension again.
- self->SetShadowFrameUnderConstruction(new_shadow_frame);
+ ScopedStackedShadowFramePusher pusher(
+ self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
self->EndAssertNoThreadSuspension(old_cause);
// We need to do runtime check on reference assignment. We need to load the shorty
@@ -590,8 +591,6 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
break;
}
}
- // We're done with the construction.
- self->ClearShadowFrameUnderConstruction();
} else {
// Fast path: no extra checks.
if (is_range) {
@@ -705,30 +704,31 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
return false;
}
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(arrayClass == nullptr)) {
+ Class* array_class = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(array_class == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
- CHECK(arrayClass->IsArrayClass());
- Class* componentClass = arrayClass->GetComponentType();
- if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
- if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
+ CHECK(array_class->IsArrayClass());
+ Class* component_class = array_class->GetComponentType();
+ const bool is_primitive_int_component = component_class->IsPrimitiveInt();
+ if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
+ if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
ThrowRuntimeException("Bad filled array request for type %s",
- PrettyDescriptor(componentClass).c_str());
+ PrettyDescriptor(component_class).c_str());
} else {
self->ThrowNewExceptionF("Ljava/lang/InternalError;",
"Found type %s; filled-new-array not implemented for anything but 'int'",
- PrettyDescriptor(componentClass).c_str());
+ PrettyDescriptor(component_class).c_str());
}
return false;
}
- Object* newArray = Array::Alloc<true>(self, arrayClass, length,
- arrayClass->GetComponentSizeShift(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(newArray == nullptr)) {
- DCHECK(self->IsExceptionPending());
+ Object* new_array = Array::Alloc<true>(self, array_class, length,
+ array_class->GetComponentSizeShift(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ if (UNLIKELY(new_array == nullptr)) {
+ self->AssertPendingOOMException();
return false;
}
uint32_t arg[5]; // only used in filled-new-array.
@@ -738,17 +738,18 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
} else {
inst->GetVarArgs(arg);
}
- const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
for (int32_t i = 0; i < length; ++i) {
size_t src_reg = is_range ? vregC + i : arg[i];
if (is_primitive_int_component) {
- newArray->AsIntArray()->SetWithoutChecks<transaction_active>(i, shadow_frame.GetVReg(src_reg));
+ new_array->AsIntArray()->SetWithoutChecks<transaction_active>(
+ i, shadow_frame.GetVReg(src_reg));
} else {
- newArray->AsObjectArray<Object>()->SetWithoutChecks<transaction_active>(i, shadow_frame.GetVRegReference(src_reg));
+ new_array->AsObjectArray<Object>()->SetWithoutChecks<transaction_active>(
+ i, shadow_frame.GetVRegReference(src_reg));
}
}
- result->SetL(newArray);
+ result->SetL(new_array);
return true;
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6fafcd1611..0124d90372 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -295,7 +295,10 @@ static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame&
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
+ if (size == 0) {
+ // Empty packed switch, move forward by 3 (size of PACKED_SWITCH).
+ return 3;
+ }
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
DCHECK(IsAligned<4>(keys));
int32_t first_key = keys[0];
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index b39567b297..6a6d1986dc 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -61,6 +61,8 @@ union PACKED(4) JValue {
uint8_t GetZ() const { return z; }
void SetZ(uint8_t new_z) { z = new_z; }
+ mirror::Object** GetGCRoot() { return &l; }
+
private:
uint8_t z;
int8_t b;
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 67dcc9c6af..a41aed6f29 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -282,11 +282,11 @@ static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool pub
static jobjectArray Class_getDeclaredConstructorsInternal(
JNIEnv* env, jobject javaThis, jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
- auto* klass = DecodeClass(soa, javaThis);
- StackHandleScope<1> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t constructor_count = 0;
// Two pass approach for speed.
- for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u;
}
auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
@@ -296,7 +296,7 @@ static jobjectArray Class_getDeclaredConstructorsInternal(
return nullptr;
}
constructor_count = 0;
- for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) {
auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), &m);
if (UNLIKELY(constructor == nullptr)) {
@@ -319,16 +319,16 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
// were synthesized by the runtime.
constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<4> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
if (UNLIKELY(h_method_name.Get() == nullptr)) {
ThrowNullPointerException("name == null");
return nullptr;
}
auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
- auto* klass = DecodeClass(soa, javaThis);
+ Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
ArtMethod* result = nullptr;
- for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetVirtualMethods(sizeof(void*))) {
auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
// May cause thread suspension.
mirror::String* np_name = np_method->GetNameAsString(soa.Self());
@@ -347,7 +347,7 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
}
}
if (result == nullptr) {
- for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
if ((modifiers & kAccConstructor) != 0) {
continue;
@@ -381,7 +381,7 @@ static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaT
jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<2> hs(soa.Self());
- auto klass = hs.NewHandle(DecodeClass(soa, javaThis));
+ Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
@@ -432,7 +432,7 @@ static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaT
static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<4> hs(soa.Self());
- auto klass = hs.NewHandle(DecodeClass(soa, javaThis));
+ Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
if (UNLIKELY(klass->GetPrimitiveType() != 0 || klass->IsInterface() || klass->IsArrayClass() ||
klass->IsAbstract())) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
diff --git a/runtime/oat.h b/runtime/oat.h
index 604e16171d..000ae8ed5d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '3', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '4', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8c9782aefe..02baad758f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -163,8 +163,8 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_handler_(exception_handler),
- prev_shadow_frame_(nullptr) {
- CHECK(!self_->HasDeoptimizationShadowFrame());
+ prev_shadow_frame_(nullptr),
+ stacked_shadow_frame_pushed_(false) {
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -174,6 +174,13 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ if (!stacked_shadow_frame_pushed_) {
+ // In case there is no deoptimized shadow frame for this upcall, we still
+ // need to push a nullptr to the stack since there is always a matching pop after
+ // the long jump.
+ self_->PushStackedShadowFrame(nullptr, StackedShadowFrameType::kDeoptimizationShadowFrame);
+ stacked_shadow_frame_pushed_ = true;
+ }
return false; // End stack walk.
} else if (method->IsRuntimeMethod()) {
// Ignore callee save method.
@@ -204,111 +211,116 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
bool verifier_success = verifier.Verify();
CHECK(verifier_success) << PrettyMethod(m);
ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
- self_->SetShadowFrameUnderConstruction(new_frame);
- const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
-
- // Markers for dead values, used when the verifier knows a Dex register is undefined,
- // or when the compiler knows the register has not been initialized, or is not used
- // anymore in the method.
- static constexpr uint32_t kDeadValue = 0xEBADDE09;
- static constexpr uint64_t kLongDeadValue = 0xEBADDE09EBADDE09;
- for (uint16_t reg = 0; reg < num_regs; ++reg) {
- VRegKind kind = GetVRegKind(reg, kinds);
- switch (kind) {
- case kUndefined:
- new_frame->SetVReg(reg, kDeadValue);
- break;
- case kConstant:
- new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
- break;
- case kReferenceVReg: {
- uint32_t value = 0;
- // Check IsReferenceVReg in case the compiled GC map doesn't agree with the verifier.
- // We don't want to copy a stale reference into the shadow frame as a reference.
- // b/20736048
- if (GetVReg(m, reg, kind, &value) && IsReferenceVReg(m, reg)) {
- new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value));
- } else {
+ {
+ ScopedStackedShadowFramePusher pusher(self_, new_frame,
+ StackedShadowFrameType::kShadowFrameUnderConstruction);
+ const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
+
+ // Markers for dead values, used when the verifier knows a Dex register is undefined,
+ // or when the compiler knows the register has not been initialized, or is not used
+ // anymore in the method.
+ static constexpr uint32_t kDeadValue = 0xEBADDE09;
+ static constexpr uint64_t kLongDeadValue = 0xEBADDE09EBADDE09;
+ for (uint16_t reg = 0; reg < num_regs; ++reg) {
+ VRegKind kind = GetVRegKind(reg, kinds);
+ switch (kind) {
+ case kUndefined:
new_frame->SetVReg(reg, kDeadValue);
- }
- break;
- }
- case kLongLoVReg:
- if (GetVRegKind(reg + 1, kinds) == kLongHiVReg) {
- // Treat it as a "long" register pair.
- uint64_t value = 0;
- if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &value)) {
- new_frame->SetVRegLong(reg, value);
- } else {
- new_frame->SetVRegLong(reg, kLongDeadValue);
- }
- } else {
+ break;
+ case kConstant:
+ new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
+ break;
+ case kReferenceVReg: {
uint32_t value = 0;
- if (GetVReg(m, reg, kind, &value)) {
- new_frame->SetVReg(reg, value);
+ // Check IsReferenceVReg in case the compiled GC map doesn't agree with the verifier.
+ // We don't want to copy a stale reference into the shadow frame as a reference.
+ // b/20736048
+ if (GetVReg(m, reg, kind, &value) && IsReferenceVReg(m, reg)) {
+ new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value));
} else {
new_frame->SetVReg(reg, kDeadValue);
}
+ break;
}
- break;
- case kLongHiVReg:
- if (GetVRegKind(reg - 1, kinds) == kLongLoVReg) {
- // Nothing to do: we treated it as a "long" register pair.
- } else {
- uint32_t value = 0;
- if (GetVReg(m, reg, kind, &value)) {
- new_frame->SetVReg(reg, value);
+ case kLongLoVReg:
+ if (GetVRegKind(reg + 1, kinds) == kLongHiVReg) {
+ // Treat it as a "long" register pair.
+ uint64_t value = 0;
+ if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &value)) {
+ new_frame->SetVRegLong(reg, value);
+ } else {
+ new_frame->SetVRegLong(reg, kLongDeadValue);
+ }
} else {
- new_frame->SetVReg(reg, kDeadValue);
+ uint32_t value = 0;
+ if (GetVReg(m, reg, kind, &value)) {
+ new_frame->SetVReg(reg, value);
+ } else {
+ new_frame->SetVReg(reg, kDeadValue);
+ }
}
- }
- break;
- case kDoubleLoVReg:
- if (GetVRegKind(reg + 1, kinds) == kDoubleHiVReg) {
- uint64_t value = 0;
- if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &value)) {
- // Treat it as a "double" register pair.
- new_frame->SetVRegLong(reg, value);
+ break;
+ case kLongHiVReg:
+ if (GetVRegKind(reg - 1, kinds) == kLongLoVReg) {
+ // Nothing to do: we treated it as a "long" register pair.
} else {
- new_frame->SetVRegLong(reg, kLongDeadValue);
+ uint32_t value = 0;
+ if (GetVReg(m, reg, kind, &value)) {
+ new_frame->SetVReg(reg, value);
+ } else {
+ new_frame->SetVReg(reg, kDeadValue);
+ }
}
- } else {
- uint32_t value = 0;
- if (GetVReg(m, reg, kind, &value)) {
- new_frame->SetVReg(reg, value);
+ break;
+ case kDoubleLoVReg:
+ if (GetVRegKind(reg + 1, kinds) == kDoubleHiVReg) {
+ uint64_t value = 0;
+ if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &value)) {
+ // Treat it as a "double" register pair.
+ new_frame->SetVRegLong(reg, value);
+ } else {
+ new_frame->SetVRegLong(reg, kLongDeadValue);
+ }
} else {
- new_frame->SetVReg(reg, kDeadValue);
+ uint32_t value = 0;
+ if (GetVReg(m, reg, kind, &value)) {
+ new_frame->SetVReg(reg, value);
+ } else {
+ new_frame->SetVReg(reg, kDeadValue);
+ }
}
- }
- break;
- case kDoubleHiVReg:
- if (GetVRegKind(reg - 1, kinds) == kDoubleLoVReg) {
- // Nothing to do: we treated it as a "double" register pair.
- } else {
+ break;
+ case kDoubleHiVReg:
+ if (GetVRegKind(reg - 1, kinds) == kDoubleLoVReg) {
+ // Nothing to do: we treated it as a "double" register pair.
+ } else {
+ uint32_t value = 0;
+ if (GetVReg(m, reg, kind, &value)) {
+ new_frame->SetVReg(reg, value);
+ } else {
+ new_frame->SetVReg(reg, kDeadValue);
+ }
+ }
+ break;
+ default:
uint32_t value = 0;
if (GetVReg(m, reg, kind, &value)) {
new_frame->SetVReg(reg, value);
} else {
new_frame->SetVReg(reg, kDeadValue);
}
- }
- break;
- default:
- uint32_t value = 0;
- if (GetVReg(m, reg, kind, &value)) {
- new_frame->SetVReg(reg, value);
- } else {
- new_frame->SetVReg(reg, kDeadValue);
- }
- break;
+ break;
+ }
}
}
if (prev_shadow_frame_ != nullptr) {
prev_shadow_frame_->SetLink(new_frame);
} else {
- self_->SetDeoptimizationShadowFrame(new_frame);
+ // Will be popped after the long jump after DeoptimizeStack(),
+ // right before interpreter::EnterInterpreterFromDeoptimize().
+ stacked_shadow_frame_pushed_ = true;
+ self_->PushStackedShadowFrame(new_frame, StackedShadowFrameType::kDeoptimizationShadowFrame);
}
- self_->ClearShadowFrameUnderConstruction();
prev_shadow_frame_ = new_frame;
return true;
}
@@ -316,6 +328,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
Thread* const self_;
QuickExceptionHandler* const exception_handler_;
ShadowFrame* prev_shadow_frame_;
+ bool stacked_shadow_frame_pushed_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 29635a4308..f314f61245 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -147,29 +147,82 @@ void Thread::ResetQuickAllocEntryPointsForThread() {
ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
}
-void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
- tlsPtr_.deoptimization_shadow_frame = sf;
-}
+class DeoptimizationReturnValueRecord {
+ public:
+ DeoptimizationReturnValueRecord(const JValue& ret_val,
+ bool is_reference,
+ DeoptimizationReturnValueRecord* link)
+ : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
+
+ JValue GetReturnValue() const { return ret_val_; }
+ bool IsReference() const { return is_reference_; }
+ DeoptimizationReturnValueRecord* GetLink() const { return link_; }
+ mirror::Object** GetGCRoot() {
+ DCHECK(is_reference_);
+ return ret_val_.GetGCRoot();
+ }
+
+ private:
+ JValue ret_val_;
+ const bool is_reference_;
+ DeoptimizationReturnValueRecord* const link_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
+};
+
+class StackedShadowFrameRecord {
+ public:
+ StackedShadowFrameRecord(ShadowFrame* shadow_frame,
+ StackedShadowFrameType type,
+ StackedShadowFrameRecord* link)
+ : shadow_frame_(shadow_frame),
+ type_(type),
+ link_(link) {}
-void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
- tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
+ ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
+ StackedShadowFrameType GetType() const { return type_; }
+ StackedShadowFrameRecord* GetLink() const { return link_; }
+
+ private:
+ ShadowFrame* const shadow_frame_;
+ const StackedShadowFrameType type_;
+ StackedShadowFrameRecord* const link_;
+
+ DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
+};
+
+void Thread::PushAndClearDeoptimizationReturnValue() {
+ DeoptimizationReturnValueRecord* record = new DeoptimizationReturnValueRecord(
+ tls64_.deoptimization_return_value,
+ tls32_.deoptimization_return_value_is_reference,
+ tlsPtr_.deoptimization_return_value_stack);
+ tlsPtr_.deoptimization_return_value_stack = record;
+ ClearDeoptimizationReturnValue();
}
-ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
- ShadowFrame* sf = tlsPtr_.deoptimization_shadow_frame;
- tlsPtr_.deoptimization_shadow_frame = nullptr;
- ret_val->SetJ(tls64_.deoptimization_return_value.GetJ());
- return sf;
+JValue Thread::PopDeoptimizationReturnValue() {
+ DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
+ DCHECK(record != nullptr);
+ tlsPtr_.deoptimization_return_value_stack = record->GetLink();
+ JValue ret_val(record->GetReturnValue());
+ delete record;
+ return ret_val;
}
-void Thread::SetShadowFrameUnderConstruction(ShadowFrame* sf) {
- sf->SetLink(tlsPtr_.shadow_frame_under_construction);
- tlsPtr_.shadow_frame_under_construction = sf;
+void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
+ StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
+ sf, type, tlsPtr_.stacked_shadow_frame_record);
+ tlsPtr_.stacked_shadow_frame_record = record;
}
-void Thread::ClearShadowFrameUnderConstruction() {
- CHECK_NE(static_cast<ShadowFrame*>(nullptr), tlsPtr_.shadow_frame_under_construction);
- tlsPtr_.shadow_frame_under_construction = tlsPtr_.shadow_frame_under_construction->GetLink();
+ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type) {
+ StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
+ DCHECK(record != nullptr);
+ DCHECK_EQ(record->GetType(), type);
+ tlsPtr_.stacked_shadow_frame_record = record->GetLink();
+ ShadowFrame* shadow_frame = record->GetShadowFrame();
+ delete record;
+ return shadow_frame;
}
void Thread::InitTid() {
@@ -2385,21 +2438,27 @@ void Thread::VisitRoots(RootVisitor* visitor) {
if (tlsPtr_.debug_invoke_req != nullptr) {
tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
}
- if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
+ if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
- for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
- shadow_frame = shadow_frame->GetLink()) {
- mapper.VisitShadowFrame(shadow_frame);
+ for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
+ record != nullptr;
+ record = record->GetLink()) {
+ for (ShadowFrame* shadow_frame = record->GetShadowFrame();
+ shadow_frame != nullptr;
+ shadow_frame = shadow_frame->GetLink()) {
+ mapper.VisitShadowFrame(shadow_frame);
+ }
}
}
- if (tlsPtr_.shadow_frame_under_construction != nullptr) {
- RootCallbackVisitor visitor_to_callback(visitor, thread_id);
- ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
- for (ShadowFrame* shadow_frame = tlsPtr_.shadow_frame_under_construction;
- shadow_frame != nullptr;
- shadow_frame = shadow_frame->GetLink()) {
- mapper.VisitShadowFrame(shadow_frame);
+ if (tlsPtr_.deoptimization_return_value_stack != nullptr) {
+ for (DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
+ record != nullptr;
+ record = record->GetLink()) {
+ if (record->IsReference()) {
+ visitor->VisitRootIfNonNull(record->GetGCRoot(),
+ RootInfo(kRootThreadObject, thread_id));
+ }
}
}
for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 27f029e54c..0e71c08b07 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -74,6 +74,7 @@ class ClassLinker;
class Closure;
class Context;
struct DebugInvokeReq;
+class DeoptimizationReturnValueRecord;
class DexFile;
class JavaVMExt;
struct JNIEnvExt;
@@ -82,6 +83,7 @@ class Runtime;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
class SingleStepControl;
+class StackedShadowFrameRecord;
class Thread;
class ThreadList;
@@ -99,6 +101,11 @@ enum ThreadFlag {
kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
};
+enum class StackedShadowFrameType {
+ kShadowFrameUnderConstruction,
+ kDeoptimizationShadowFrame
+};
+
static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
// Thread's stack layout for implicit stack overflow checks:
@@ -789,21 +796,25 @@ class Thread {
return reinterpret_cast<mirror::Throwable*>(-1);
}
- void SetDeoptimizationShadowFrame(ShadowFrame* sf);
- void SetDeoptimizationReturnValue(const JValue& ret_val);
-
- ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
-
- bool HasDeoptimizationShadowFrame() const {
- return tlsPtr_.deoptimization_shadow_frame != nullptr;
+ // Currently deoptimization invokes verifier which can trigger class loading
+ // and execute Java code, so there might be nested deoptimizations happening.
+ // We need to save the ongoing deoptimization shadow frames and return
+ // values on stacks.
+ void SetDeoptimizationReturnValue(const JValue& ret_val, bool is_reference) {
+ tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
+ tls32_.deoptimization_return_value_is_reference = is_reference;
}
-
- void SetShadowFrameUnderConstruction(ShadowFrame* sf);
- void ClearShadowFrameUnderConstruction();
-
- bool HasShadowFrameUnderConstruction() const {
- return tlsPtr_.shadow_frame_under_construction != nullptr;
+ bool IsDeoptimizationReturnValueReference() {
+ return tls32_.deoptimization_return_value_is_reference;
+ }
+ void ClearDeoptimizationReturnValue() {
+ tls64_.deoptimization_return_value.SetJ(0);
+ tls32_.deoptimization_return_value_is_reference = false;
}
+ void PushAndClearDeoptimizationReturnValue();
+ JValue PopDeoptimizationReturnValue();
+ void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
+ ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type);
std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
return tlsPtr_.instrumentation_stack;
@@ -1047,7 +1058,8 @@ class Thread {
explicit tls_32bit_sized_values(bool is_daemon) :
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
- thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false),
+ thread_exit_check_count(0), handling_signal_(false),
+ deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false),
ready_for_debug_invoke(false), debug_method_entry_(false) {
}
@@ -1088,6 +1100,10 @@ class Thread {
// True if signal is being handled by this thread.
bool32_t handling_signal_;
+ // True if the return value for interpreter after deoptimization is a reference.
+ // For gc purpose.
+ bool32_t deoptimization_return_value_is_reference;
+
// True if the thread is suspended in FullSuspendCheck(). This is
// used to distinguish runnable threads that are suspended due to
// a normal suspend check from other threads.
@@ -1123,8 +1139,9 @@ class Thread {
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
- deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
- pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
+ stacked_shadow_frame_record(nullptr), deoptimization_return_value_stack(nullptr),
+ name(nullptr), pthread_self(0),
+ last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr) {
@@ -1200,11 +1217,13 @@ class Thread {
// JDWP single-stepping support.
SingleStepControl* single_step_control;
- // Shadow frame stack that is used temporarily during the deoptimization of a method.
- ShadowFrame* deoptimization_shadow_frame;
+ // For gc purpose, a shadow frame record stack that keeps track of:
+ // 1) shadow frames under construction.
+ // 2) deoptimization shadow frames.
+ StackedShadowFrameRecord* stacked_shadow_frame_record;
- // Shadow frame stack that is currently under construction but not yet on the stack
- ShadowFrame* shadow_frame_under_construction;
+ // Deoptimization return value record stack.
+ DeoptimizationReturnValueRecord* deoptimization_return_value_stack;
// A cached copy of the java.lang.Thread's name.
std::string* name;
@@ -1292,7 +1311,25 @@ class ScopedAssertNoThreadSuspension {
const char* const old_cause_;
};
+class ScopedStackedShadowFramePusher {
+ public:
+ ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
+ : self_(self), type_(type) {
+ self_->PushStackedShadowFrame(sf, type);
+ }
+ ~ScopedStackedShadowFramePusher() {
+ self_->PopStackedShadowFrame(type_);
+ }
+
+ private:
+ Thread* const self_;
+ const StackedShadowFrameType type_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
+};
+
std::ostream& operator<<(std::ostream& os, const Thread& thread);
+std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
} // namespace art
diff --git a/test/044-proxy/src/ReturnsAndArgPassing.java b/test/044-proxy/src/ReturnsAndArgPassing.java
index a1734100bc..225cc5b232 100644
--- a/test/044-proxy/src/ReturnsAndArgPassing.java
+++ b/test/044-proxy/src/ReturnsAndArgPassing.java
@@ -57,6 +57,8 @@ public class ReturnsAndArgPassing {
check(proxy instanceof Proxy);
check(method.getDeclaringClass() == MyInterface.class);
String name = method.getName();
+ // Check for moving GC bugs in proxy stubs.
+ Runtime.getRuntime().gc();
if (name.endsWith("Foo")) {
check(args == null);
fooInvocations++;
diff --git a/test/492-checker-inline-invoke-interface/expected.txt b/test/492-checker-inline-invoke-interface/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/492-checker-inline-invoke-interface/expected.txt
diff --git a/test/492-checker-inline-invoke-interface/info.txt b/test/492-checker-inline-invoke-interface/info.txt
new file mode 100644
index 0000000000..4a0a5ff1d3
--- /dev/null
+++ b/test/492-checker-inline-invoke-interface/info.txt
@@ -0,0 +1 @@
+Checker test to ensure we can inline interface calls.
diff --git a/test/492-checker-inline-invoke-interface/src/Main.java b/test/492-checker-inline-invoke-interface/src/Main.java
new file mode 100644
index 0000000000..9063af259d
--- /dev/null
+++ b/test/492-checker-inline-invoke-interface/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Itf {
+ public void $inline$foo();
+}
+
+public class Main implements Itf {
+ public void $inline$foo() {
+ }
+
+ public static void main(String[] args) {
+ Itf itf = new Main();
+ itf.$inline$foo();
+ }
+}
diff --git a/test/498-type-propagation/expected.txt b/test/498-type-propagation/expected.txt
new file mode 100644
index 0000000000..ccaf6f8f0f
--- /dev/null
+++ b/test/498-type-propagation/expected.txt
@@ -0,0 +1 @@
+Enter
diff --git a/test/498-type-propagation/info.txt b/test/498-type-propagation/info.txt
new file mode 100644
index 0000000000..b895e91f9d
--- /dev/null
+++ b/test/498-type-propagation/info.txt
@@ -0,0 +1,2 @@
+Regression test for the SSA building of the optimizing
+compiler. See comment in smali file.
diff --git a/test/498-type-propagation/smali/TypePropagation.smali b/test/498-type-propagation/smali/TypePropagation.smali
new file mode 100644
index 0000000000..088ca89985
--- /dev/null
+++ b/test/498-type-propagation/smali/TypePropagation.smali
@@ -0,0 +1,30 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTypePropagation;
+
+.super Ljava/lang/Object;
+
+.method public static method([I)V
+ .registers 2
+ const/4 v0, 0
+ # When building the SSA graph, we will create a phi for v0, which will be of type
+ # integer. Only when we get rid of that phi in the redundant phi elimination will
+ # we realize it's just null.
+ :start
+ if-eq v1, v0, :end
+ if-eq v1, v0, :start
+ :end
+ return-void
+.end method
diff --git a/test/498-type-propagation/src/Main.java b/test/498-type-propagation/src/Main.java
new file mode 100644
index 0000000000..7a14172bf5
--- /dev/null
+++ b/test/498-type-propagation/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ // Workaround for b/18051191.
+ System.out.println("Enter");
+ Class<?> c = Class.forName("TypePropagation");
+ Method m = c.getMethod("method", int[].class);
+ int[] array = new int[7];
+ Object[] arguments = { array };
+ m.invoke(null, arguments);
+ }
+}
diff --git a/test/501-regression-packed-switch/expected.txt b/test/501-regression-packed-switch/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/501-regression-packed-switch/expected.txt
diff --git a/test/501-regression-packed-switch/info.txt b/test/501-regression-packed-switch/info.txt
new file mode 100644
index 0000000000..fbd93fa815
--- /dev/null
+++ b/test/501-regression-packed-switch/info.txt
@@ -0,0 +1,2 @@
+Regression test for the interpreter and optimizing's builder which used
+to trip when compiled code contained a packed switch with no targets.
diff --git a/test/501-regression-packed-switch/smali/Test.smali b/test/501-regression-packed-switch/smali/Test.smali
new file mode 100644
index 0000000000..8756ed5f23
--- /dev/null
+++ b/test/501-regression-packed-switch/smali/Test.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static EmptyPackedSwitch(I)I
+ .registers 1
+ packed-switch v0, :pswitch_data_6a
+ const/4 v0, 0x5
+ return v0
+
+ :pswitch_data_6a
+ .packed-switch 0x0
+ .end packed-switch
+.end method
diff --git a/test/501-regression-packed-switch/src/Main.java b/test/501-regression-packed-switch/src/Main.java
new file mode 100644
index 0000000000..b80bc62c50
--- /dev/null
+++ b/test/501-regression-packed-switch/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String args[]) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("EmptyPackedSwitch", new Class[] { int.class });
+ Integer result = (Integer) m.invoke(null, new Integer(42));
+ if (result != 5) {
+ throw new Error("Expected 5, got " + result);
+ }
+ }
+}
diff --git a/test/504-regression-baseline-entry/expected.txt b/test/504-regression-baseline-entry/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/504-regression-baseline-entry/expected.txt
diff --git a/test/504-regression-baseline-entry/info.txt b/test/504-regression-baseline-entry/info.txt
new file mode 100644
index 0000000000..26cc9ce75b
--- /dev/null
+++ b/test/504-regression-baseline-entry/info.txt
@@ -0,0 +1,2 @@
+Regression test for the baseline compiler which required the entry block to fall
+through to the next block. \ No newline at end of file
diff --git a/test/504-regression-baseline-entry/smali/Test.smali b/test/504-regression-baseline-entry/smali/Test.smali
new file mode 100644
index 0000000000..06412e7618
--- /dev/null
+++ b/test/504-regression-baseline-entry/smali/Test.smali
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static SingleGotoStart()I
+ .registers 1
+ goto :second
+
+ :first
+ return v0
+
+ :second
+ const/4 v0, 0x5
+ goto :first
+.end method
diff --git a/test/504-regression-baseline-entry/src/Main.java b/test/504-regression-baseline-entry/src/Main.java
new file mode 100644
index 0000000000..2c9df28342
--- /dev/null
+++ b/test/504-regression-baseline-entry/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Type;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String args[]) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("SingleGotoStart", (Class[]) null);
+ Integer result = (Integer) m.invoke(null);
+ if (result != 5) {
+ throw new Error("Expected 5, got " + result);
+ }
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index a6b216bf3a..284c7ecb2c 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -16,4 +16,7 @@ MoveExc
MoveExceptionOnEntry
EmptySparseSwitch
b/20224106
+b/21873167
+b/21614284
+b/21902684
Done!
diff --git a/test/800-smali/smali/b_21614284.smali b/test/800-smali/smali/b_21614284.smali
new file mode 100644
index 0000000000..3cb1bd0ce2
--- /dev/null
+++ b/test/800-smali/smali/b_21614284.smali
@@ -0,0 +1,22 @@
+.class public LB21614284;
+.super Ljava/lang/Object;
+
+.field private a:I
+
+.method public constructor <init>()V
+ .registers 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ const v0, 42
+ iput v0, p0, LB21614284;->a:I
+ return-void
+.end method
+
+.method public static test(LB21614284;)I
+ .registers 2
+ # Empty if, testing p0.
+ if-nez p0, :label
+ :label
+ # p0 still needs a null check.
+ iget v0, p0, LB21614284;->a:I
+ return v0
+.end method
diff --git a/test/800-smali/smali/b_21873167.smali b/test/800-smali/smali/b_21873167.smali
new file mode 100644
index 0000000000..c0c09cbbf2
--- /dev/null
+++ b/test/800-smali/smali/b_21873167.smali
@@ -0,0 +1,18 @@
+.class public LB21873167;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public test()V
+ .registers 1
+ :start
+ monitor-enter p0
+ monitor-exit p0
+ :end
+ return-void
+ .catchall {:start .. :end} :end
+.end method
diff --git a/test/800-smali/smali/b_21902684.smali b/test/800-smali/smali/b_21902684.smali
new file mode 100644
index 0000000000..2d906b61bd
--- /dev/null
+++ b/test/800-smali/smali/b_21902684.smali
@@ -0,0 +1,17 @@
+.class public LB21902684;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public test()V
+ .registers 1
+ goto :end
+ new-instance v0, Ljava/lang/String;
+ invoke-direct {v0}, Ljava/lang/String;-><init>()V
+ :end
+ return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 3e88364089..8d66783324 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -81,6 +81,10 @@ public class Main {
null));
testCases.add(new TestCase("b/20224106", "B20224106", "run", null, new VerifyError(),
0));
+ testCases.add(new TestCase("b/21873167", "B21873167", "test", null, null, null));
+ testCases.add(new TestCase("b/21614284", "B21614284", "test", new Object[] { null },
+ new NullPointerException(), null));
+ testCases.add(new TestCase("b/21902684", "B21902684", "test", null, null, null));
}
public void runTests() {
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index 2b57222049..c74508d9cd 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -154,10 +154,12 @@ def ProcessFile(filename):
sys.stderr.write('%s\n' % (rest))
Confused(filename, line_number, raw_line)
- if len(enclosing_classes) > 0:
- if is_enum_class:
- enum_value = enum_name + '::' + enum_value
- else:
+ # If the enum is scoped, we must prefix enum value with enum name (which is already prefixed
+ # by enclosing classes).
+ if is_enum_class:
+ enum_value = enum_name + '::' + enum_value
+ else:
+ if len(enclosing_classes) > 0:
enum_value = '::'.join(enclosing_classes) + '::' + enum_value
_ENUMS[enum_name].append((enum_value, enum_text))