summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk4
-rw-r--r--compiler/dex/bb_optimizations.cc90
-rw-r--r--compiler/dex/bb_optimizations.h68
-rw-r--r--compiler/dex/frontend.cc6
-rw-r--r--compiler/dex/mir_dataflow.cc2
-rw-r--r--compiler/dex/mir_graph.cc41
-rw-r--r--compiler/dex/mir_graph.h16
-rw-r--r--compiler/dex/mir_optimization_test.cc2
-rw-r--r--compiler/dex/pass_driver_me.cc208
-rw-r--r--compiler/dex/pass_driver_me.h147
-rw-r--r--compiler/dex/pass_driver_me_opts.cc88
-rw-r--r--compiler/dex/pass_driver_me_opts.h44
-rw-r--r--compiler/dex/pass_driver_me_post_opt.cc75
-rw-r--r--compiler/dex/pass_driver_me_post_opt.h39
-rw-r--r--compiler/dex/pass_me.h5
-rw-r--r--compiler/dex/post_opt_passes.cc108
-rw-r--r--compiler/dex/post_opt_passes.h284
-rw-r--r--compiler/dex/quick/gen_invoke.cc8
-rw-r--r--compiler/dex/quick/ralloc_util.cc11
-rw-r--r--compiler/dex/ssa_transformation.cc4
-rw-r--r--compiler/dex/vreg_analysis.cc3
-rw-r--r--compiler/jni/jni_compiler_test.cc121
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc5
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc7
-rw-r--r--compiler/jni/quick/calling_convention.h3
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc5
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc5
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc7
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc11
-rw-r--r--compiler/utils/x86/assembler_x86.cc9
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc13
-rw-r--r--dex2oat/dex2oat.cc12
-rw-r--r--disassembler/disassembler_x86.cc9
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/arch_test.cc17
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S36
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S28
-rw-r--r--runtime/catch_block_stack_visitor.cc67
-rw-r--r--runtime/catch_block_stack_visitor.h58
-rw-r--r--runtime/class_linker-inl.h14
-rw-r--r--runtime/class_linker.h2
-rw-r--r--runtime/class_linker_test.cc3
-rw-r--r--runtime/deoptimize_stack_visitor.cc88
-rw-r--r--runtime/deoptimize_stack_visitor.h55
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h4
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc22
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc15
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc32
-rw-r--r--runtime/entrypoints/quick/quick_fillarray_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc7
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc20
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc151
-rw-r--r--runtime/exception_test.cc4
-rw-r--r--runtime/fault_handler.cc3
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/handle_scope.h14
-rw-r--r--runtime/interpreter/interpreter.cc2
-rw-r--r--runtime/interpreter/interpreter_common.h11
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc34
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc34
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/mem_map.cc11
-rw-r--r--runtime/mirror/array.cc6
-rw-r--r--runtime/mirror/art_method-inl.h12
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc5
-rw-r--r--runtime/native/java_lang_reflect_Array.cc4
-rw-r--r--runtime/native/scoped_fast_native_object_access.h2
-rw-r--r--runtime/quick_exception_handler.cc186
-rw-r--r--runtime/quick_exception_handler.h24
-rw-r--r--runtime/stack.cc60
-rw-r--r--runtime/stack.h35
-rw-r--r--runtime/thread.cc23
-rw-r--r--runtime/thread.h3
-rw-r--r--runtime/verifier/reg_type.cc2
82 files changed, 1525 insertions, 1067 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index f297213e74..3bed01dc5a 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -60,7 +60,9 @@ LIBART_COMPILER_SRC_FILES := \
dex/mir_method_info.cc \
dex/mir_optimization.cc \
dex/bb_optimizations.cc \
- dex/pass_driver_me.cc \
+ dex/post_opt_passes.cc \
+ dex/pass_driver_me_opts.cc \
+ dex/pass_driver_me_post_opt.cc \
dex/frontend.cc \
dex/mir_graph.cc \
dex/mir_analysis.cc \
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index 8b5eba0f67..06e259a65f 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -26,83 +26,11 @@ namespace art {
bool CodeLayout::Worker(const PassDataHolder* data) const {
DCHECK(data != nullptr);
const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- cUnit->mir_graph->LayoutBlocks(bb);
- // No need of repeating, so just return false.
- return false;
-}
-
-/*
- * SSATransformation pass implementation start.
- */
-void SSATransformation::Start(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->SSATransformationStart();
-}
-
-bool SSATransformation::Worker(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
- BasicBlock* bb = pass_me_data_holder->bb;
- DCHECK(bb != nullptr);
- cUnit->mir_graph->InsertPhiNodeOperands(bb);
- // No need of repeating, so just return false.
- return false;
-}
-
-void SSATransformation::End(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->SSATransformationEnd();
-}
-
-/*
- * ConstantPropagation pass implementation start
- */
-bool ConstantPropagation::Worker(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
- BasicBlock* bb = pass_me_data_holder->bb;
- DCHECK(bb != nullptr);
- cUnit->mir_graph->DoConstantPropagation(bb);
- // No need of repeating, so just return false.
- return false;
-}
-
-/*
- * MethodUseCount pass implementation start.
- */
-bool MethodUseCount::Gate(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- // First initialize the data.
- cUnit->mir_graph->InitializeMethodUses();
-
- // Now check if the pass is to be ignored.
- bool res = ((cUnit->disable_opt & (1 << kPromoteRegs)) == 0);
-
- return res;
-}
-
-bool MethodUseCount::Worker(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
- BasicBlock* bb = pass_me_data_holder->bb;
- DCHECK(bb != nullptr);
- cUnit->mir_graph->CountUses(bb);
+ c_unit->mir_graph->LayoutBlocks(bb);
// No need of repeating, so just return false.
return false;
}
@@ -113,11 +41,11 @@ bool MethodUseCount::Worker(const PassDataHolder* data) const {
bool BBCombine::Worker(const PassDataHolder* data) const {
DCHECK(data != nullptr);
const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- cUnit->mir_graph->CombineBlocks(bb);
+ c_unit->mir_graph->CombineBlocks(bb);
// No need of repeating, so just return false.
return false;
@@ -128,15 +56,15 @@ bool BBCombine::Worker(const PassDataHolder* data) const {
*/
void BBOptimizations::Start(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
/*
* This pass has a different ordering depEnding on the suppress exception,
* so do the pass here for now:
* - Later, the Start should just change the ordering and we can move the extended
* creation into the pass driver's main job with a new iterator
*/
- cUnit->mir_graph->BasicBlockOptimization();
+ c_unit->mir_graph->BasicBlockOptimization();
}
} // namespace art
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 3a529f2096..00947902e7 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -119,7 +119,7 @@ class CallInlining : public PassME {
*/
class CodeLayout : public PassME {
public:
- CodeLayout() : PassME("CodeLayout", "2_post_layout_cfg") {
+ CodeLayout() : PassME("CodeLayout", kAllNodes, kOptimizationBasicBlockChange, "2_post_layout_cfg") {
}
void Start(const PassDataHolder* data) const {
@@ -133,72 +133,6 @@ class CodeLayout : public PassME {
};
/**
- * @class SSATransformation
- * @brief Perform an SSA representation pass on the CompilationUnit.
- */
-class SSATransformation : public PassME {
- public:
- SSATransformation() : PassME("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") {
- }
-
- bool Worker(const PassDataHolder* data) const;
-
- void Start(const PassDataHolder* data) const;
-
- void End(const PassDataHolder* data) const;
-};
-
-/**
- * @class ConstantPropagation
- * @brief Perform a constant propagation pass.
- */
-class ConstantPropagation : public PassME {
- public:
- ConstantPropagation() : PassME("ConstantPropagation") {
- }
-
- bool Worker(const PassDataHolder* data) const;
-
- void Start(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->InitializeConstantPropagation();
- }
-};
-
-/**
- * @class InitRegLocations
- * @brief Initialize Register Locations.
- */
-class InitRegLocations : public PassME {
- public:
- InitRegLocations() : PassME("InitRegLocation", kNoNodes) {
- }
-
- void Start(const PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->InitRegLocations();
- }
-};
-
-/**
- * @class MethodUseCount
- * @brief Count the register uses of the method
- */
-class MethodUseCount : public PassME {
- public:
- MethodUseCount() : PassME("UseCount") {
- }
-
- bool Worker(const PassDataHolder* data) const;
-
- bool Gate(const PassDataHolder* data) const;
-};
-
-/**
* @class NullCheckEliminationAndTypeInference
* @brief Null check elimination and type inference.
*/
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index c204ba55bd..32b554c72c 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -21,7 +21,7 @@
#include "dataflow_iterator-inl.h"
#include "leb128.h"
#include "mirror/object.h"
-#include "pass_driver_me.h"
+#include "pass_driver_me_opts.h"
#include "runtime.h"
#include "base/logging.h"
#include "base/timing_logger.h"
@@ -751,7 +751,7 @@ static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
}
for (int idx = 0; idx < cu.mir_graph->GetNumBlocks(); idx++) {
- BasicBlock *bb = cu.mir_graph->GetBasicBlock(idx);
+ BasicBlock* bb = cu.mir_graph->GetBasicBlock(idx);
if (bb == NULL) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
@@ -927,7 +927,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
}
/* Create the pass driver and launch it */
- PassDriverME pass_driver(&cu);
+ PassDriverMEOpts pass_driver(&cu);
pass_driver.Launch();
if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 47b233b463..5ff62743ce 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1282,7 +1282,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
while (true) {
- BasicBlock *pred_bb = GetBasicBlock(iter.Next());
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) break;
bool found = false;
if (pred_bb->taken == bb->id) {
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 0fffa01350..99dd50a53c 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -26,6 +26,7 @@
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "leb128.h"
+#include "pass_driver_me_post_opt.h"
namespace art {
@@ -353,7 +354,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
- BasicBlock *fallthrough_block = FindBlock(cur_offset + width,
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width,
/*
* If the method is processed
* in sequential order from the
@@ -541,7 +542,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
* Note also that the dex_pc_to_block_map_ entry for the potentially
* throwing instruction will refer to the original basic block.
*/
- BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+ BasicBlock* new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
block_list_.Insert(new_block);
new_block->start_offset = insn->offset;
cur_block->fall_through = new_block->id;
@@ -724,7 +725,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
}
current_offset_ += width;
- BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
+ BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
false, /* immed_pred_block_p */ NULL);
if (next_block) {
/*
@@ -1418,25 +1419,6 @@ void MIRGraph::SSATransformationStart() {
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
- /* Compute the DFS order */
- ComputeDFSOrders();
-
- /* Compute the dominator info */
- ComputeDominators();
-
- /* Allocate data structures in preparation for SSA conversion */
- CompilerInitializeSSAConversion();
-
- /* Find out the "Dalvik reg def x block" relation */
- ComputeDefBlockMatrix();
-
- /* Insert phi nodes to dominance frontiers for all variables */
- InsertPhiNodes();
-
- /* Rename register names by local defs and phi nodes */
- ClearAllVisitedFlags();
- DoDFSPreOrderSSARename(GetEntryBlock());
-
// Update the maximum number of reachable blocks.
max_num_reachable_blocks_ = num_reachable_blocks_;
}
@@ -1454,7 +1436,7 @@ void MIRGraph::SSATransformationEnd() {
}
void MIRGraph::ComputeTopologicalSortOrder() {
- std::queue<BasicBlock *> q;
+ std::queue<BasicBlock*> q;
std::map<int, int> visited_cnt_values;
// Clear the nodes.
@@ -1510,7 +1492,7 @@ void MIRGraph::ComputeTopologicalSortOrder() {
while (q.size() > 0) {
// Get top.
- BasicBlock *bb = q.front();
+ BasicBlock* bb = q.front();
q.pop();
DCHECK_EQ(bb->hidden, false);
@@ -1528,7 +1510,7 @@ void MIRGraph::ComputeTopologicalSortOrder() {
// Reduce visitedCnt for all the successors and add into the queue ones with visitedCnt equals to zero.
ChildBlockIterator succIter(bb, this);
- BasicBlock *successor = succIter.Next();
+ BasicBlock* successor = succIter.Next();
while (successor != nullptr) {
// one more predecessor was visited.
visited_cnt_values[successor->id]--;
@@ -1914,4 +1896,13 @@ BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
return res;
}
+void MIRGraph::CalculateBasicBlockInformation() {
+ PassDriverMEPostOpt driver(cu_);
+ driver.Launch();
+}
+
+void MIRGraph::InitializeBasicBlockData() {
+ num_blocks_ = block_list_.Size();
+}
+
} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 3655125182..b04c16ea78 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -924,7 +924,7 @@ class MIRGraph {
void VerifyDataflow();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
void EliminateNullChecksAndInferTypesStart();
- bool EliminateNullChecksAndInferTypes(BasicBlock *bb);
+ bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
void EliminateNullChecksAndInferTypesEnd();
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
@@ -1030,6 +1030,14 @@ class MIRGraph {
void AllocateSSAUseData(MIR *mir, int num_uses);
void AllocateSSADefData(MIR *mir, int num_defs);
+ void CalculateBasicBlockInformation();
+ void InitializeBasicBlockData();
+ void ComputeDFSOrders();
+ void ComputeDefBlockMatrix();
+ void ComputeDominators();
+ void CompilerInitializeSSAConversion();
+ void InsertPhiNodes();
+ void DoDFSPreOrderSSARename(BasicBlock* block);
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
@@ -1046,7 +1054,6 @@ class MIRGraph {
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
- void ComputeDFSOrders();
protected:
int FindCommonParent(int block1, int block2);
@@ -1055,7 +1062,6 @@ class MIRGraph {
void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
ArenaBitVector* live_in_v, int dalvik_reg_id);
void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
- void CompilerInitializeSSAConversion();
bool DoSSAConversion(BasicBlock* bb);
bool InvokeUsesMethodStar(MIR* mir);
int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
@@ -1082,11 +1088,7 @@ class MIRGraph {
BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
void MarkPreOrder(BasicBlock* bb);
void RecordDFSOrders(BasicBlock* bb);
- void ComputeDefBlockMatrix();
void ComputeDomPostOrderTraversal(BasicBlock* bb);
- void ComputeDominators();
- void InsertPhiNodes();
- void DoDFSPreOrderSSARename(BasicBlock* block);
void SetConstant(int32_t ssa_reg, int value);
void SetConstantWide(int ssa_reg, int64_t value);
int GetSSAUseCount(int s_reg);
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 86092b6e3d..69c394f168 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -193,7 +193,7 @@ class ClassInitCheckEliminationTest : public testing::Test {
ASSERT_TRUE(gate_result);
RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
bool change = false;
- for (BasicBlock *bb = iterator.Next(change); bb != 0; bb = iterator.Next(change)) {
+ for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
change = cu_.mir_graph->EliminateClassInitChecks(bb);
}
cu_.mir_graph->EliminateClassInitChecksEnd();
diff --git a/compiler/dex/pass_driver_me.cc b/compiler/dex/pass_driver_me.cc
deleted file mode 100644
index e6d90e0b03..0000000000
--- a/compiler/dex/pass_driver_me.cc
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/macros.h"
-#include "bb_optimizations.h"
-#include "compiler_internals.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-#include "pass_driver_me.h"
-
-namespace art {
-
-namespace { // anonymous namespace
-
-void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass, DataflowIterator* iterator) {
- // Paranoid: Check the iterator before walking the BasicBlocks.
- DCHECK(iterator != nullptr);
- bool change = false;
- for (BasicBlock *bb = iterator->Next(change); bb != 0; bb = iterator->Next(change)) {
- data->bb = bb;
- change = pass->Worker(data);
- }
-}
-
-template <typename Iterator>
-inline void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass) {
- DCHECK(data != nullptr);
- CompilationUnit* c_unit = data->c_unit;
- DCHECK(c_unit != nullptr);
- Iterator iterator(c_unit->mir_graph.get());
- DoWalkBasicBlocks(data, pass, &iterator);
-}
-} // anonymous namespace
-
-/*
- * Create the pass list. These passes are immutable and are shared across the threads.
- *
- * Advantage is that there will be no race conditions here.
- * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
- * - This is not yet an issue: no current pass would require it.
- */
-// The initial list of passes to be used by the PassDriveME.
-template<>
-const Pass* const PassDriver<PassDriverME>::g_passes[] = {
- GetPassInstance<CacheFieldLoweringInfo>(),
- GetPassInstance<CacheMethodLoweringInfo>(),
- GetPassInstance<CallInlining>(),
- GetPassInstance<CodeLayout>(),
- GetPassInstance<SSATransformation>(),
- GetPassInstance<ConstantPropagation>(),
- GetPassInstance<InitRegLocations>(),
- GetPassInstance<MethodUseCount>(),
- GetPassInstance<NullCheckEliminationAndTypeInference>(),
- GetPassInstance<ClassInitCheckElimination>(),
- GetPassInstance<BBCombine>(),
- GetPassInstance<BBOptimizations>(),
-};
-
-// The number of the passes in the initial list of Passes (g_passes).
-template<>
-uint16_t const PassDriver<PassDriverME>::g_passes_size = arraysize(PassDriver<PassDriverME>::g_passes);
-
-// The default pass list is used by the PassDriverME instance of PassDriver to initialize pass_list_.
-template<>
-std::vector<const Pass*> PassDriver<PassDriverME>::g_default_pass_list(PassDriver<PassDriverME>::g_passes, PassDriver<PassDriverME>::g_passes + PassDriver<PassDriverME>::g_passes_size);
-
-// By default, do not have a dump pass list.
-template<>
-std::string PassDriver<PassDriverME>::dump_pass_list_ = std::string();
-
-// By default, do not have a print pass list.
-template<>
-std::string PassDriver<PassDriverME>::print_pass_list_ = std::string();
-
-// By default, we do not print the pass' information.
-template<>
-bool PassDriver<PassDriverME>::default_print_passes_ = false;
-
-
-PassDriverME::PassDriverME(CompilationUnit* cu)
- : PassDriver(), pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
- pass_me_data_holder_.bb = nullptr;
- pass_me_data_holder_.c_unit = cu;
-}
-
-PassDriverME::~PassDriverME() {
-}
-
-void PassDriverME::DispatchPass(const Pass* pass) {
- VLOG(compiler) << "Dispatching " << pass->GetName();
- const PassME* me_pass = down_cast<const PassME*>(pass);
-
- DataFlowAnalysisMode mode = me_pass->GetTraversal();
-
- switch (mode) {
- case kPreOrderDFSTraversal:
- DoWalkBasicBlocks<PreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kRepeatingPreOrderDFSTraversal:
- DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kRepeatingPostOrderDFSTraversal:
- DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kReversePostOrderDFSTraversal:
- DoWalkBasicBlocks<ReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kRepeatingReversePostOrderDFSTraversal:
- DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kPostOrderDOMTraversal:
- DoWalkBasicBlocks<PostOrderDOMIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kAllNodes:
- DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
- break;
- case kNoNodes:
- break;
- default:
- LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
- break;
- }
-}
-
-bool PassDriverME::RunPass(const Pass* pass, bool time_split) {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
- DCHECK(pass != nullptr);
- DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
- CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
- DCHECK(c_unit != nullptr);
-
- // Do we perform a time split
- if (time_split) {
- c_unit->NewTimingSplit(pass->GetName());
- }
-
- // Check the pass gate first.
- bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
-
- if (should_apply_pass) {
- bool old_print_pass = c_unit->print_pass;
-
- c_unit->print_pass = default_print_passes_;
-
- const char* print_pass_list = print_pass_list_.c_str();
-
- if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
- c_unit->print_pass = true;
- }
-
- // Applying the pass: first start, doWork, and end calls.
- ApplyPass(&pass_me_data_holder_, pass);
-
- // Do we want to log it?
- bool should_dump = ((c_unit->enable_debug & (1 << kDebugDumpCFG)) != 0);
-
- const char* dump_pass_list = dump_pass_list_.c_str();
-
- if (dump_pass_list != nullptr) {
- bool found = strstr(dump_pass_list, pass->GetName());
- should_dump = (should_dump || found);
- }
-
- if (should_dump) {
- // Do we want to log it?
- if ((c_unit->enable_debug& (1 << kDebugDumpCFG)) != 0) {
- // Do we have a pass folder?
- const PassME* me_pass = (down_cast<const PassME*>(pass));
- const char* passFolder = me_pass->GetDumpCFGFolder();
- DCHECK(passFolder != nullptr);
-
- if (passFolder[0] != 0) {
- // Create directory prefix.
- std::string prefix = GetDumpCFGFolder();
- prefix += passFolder;
- prefix += "/";
-
- c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
- }
- }
- }
-
- c_unit->print_pass = old_print_pass;
- }
-
- // If the pass gate passed, we can declare success.
- return should_apply_pass;
-}
-
-const char* PassDriverME::GetDumpCFGFolder() const {
- return dump_cfg_folder_;
-}
-
-
-} // namespace art
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 0142934be2..7d76fb83d4 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -18,28 +18,155 @@
#define ART_COMPILER_DEX_PASS_DRIVER_ME_H_
#include "bb_optimizations.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
#include "pass_driver.h"
#include "pass_me.h"
namespace art {
-class PassDriverME: public PassDriver<PassDriverME> {
+template <typename PassDriverType>
+class PassDriverME: public PassDriver<PassDriverType> {
public:
- explicit PassDriverME(CompilationUnit* cu);
- ~PassDriverME();
- /**
- * @brief Dispatch a patch: walk the BasicBlocks depending on the traversal mode
- */
- void DispatchPass(const Pass* pass);
- bool RunPass(const Pass* pass, bool time_split = false);
- const char* GetDumpCFGFolder() const;
+ explicit PassDriverME(CompilationUnit* cu)
+ : pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
+ pass_me_data_holder_.bb = nullptr;
+ pass_me_data_holder_.c_unit = cu;
+ }
+
+ ~PassDriverME() {
+ }
+
+ void DispatchPass(const Pass* pass) {
+ VLOG(compiler) << "Dispatching " << pass->GetName();
+ const PassME* me_pass = down_cast<const PassME*>(pass);
+
+ DataFlowAnalysisMode mode = me_pass->GetTraversal();
+
+ switch (mode) {
+ case kPreOrderDFSTraversal:
+ DoWalkBasicBlocks<PreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingPreOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingPostOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kReversePostOrderDFSTraversal:
+ DoWalkBasicBlocks<ReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingReversePostOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kPostOrderDOMTraversal:
+ DoWalkBasicBlocks<PostOrderDOMIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kAllNodes:
+ DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kNoNodes:
+ break;
+ default:
+ LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
+ break;
+ }
+ }
+
+ bool RunPass(const Pass* pass, bool time_split) {
+ // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ DCHECK(pass != nullptr);
+ DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
+ CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
+ DCHECK(c_unit != nullptr);
+
+ // Do we perform a time split
+ if (time_split) {
+ c_unit->NewTimingSplit(pass->GetName());
+ }
+
+ // Check the pass gate first.
+ bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
+ if (should_apply_pass) {
+ bool old_print_pass = c_unit->print_pass;
+
+ c_unit->print_pass = PassDriver<PassDriverType>::default_print_passes_;
+
+ const char* print_pass_list = PassDriver<PassDriverType>::print_pass_list_.c_str();
+
+ if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
+ c_unit->print_pass = true;
+ }
+
+ // Applying the pass: first start, doWork, and end calls.
+ this->ApplyPass(&pass_me_data_holder_, pass);
+
+ bool should_dump = ((c_unit->enable_debug & (1 << kDebugDumpCFG)) != 0);
+
+ const char* dump_pass_list = PassDriver<PassDriverType>::dump_pass_list_.c_str();
+
+ if (dump_pass_list != nullptr) {
+ bool found = strstr(dump_pass_list, pass->GetName());
+ should_dump = (should_dump || found);
+ }
+
+ if (should_dump) {
+ // Do we want to log it?
+ if ((c_unit->enable_debug& (1 << kDebugDumpCFG)) != 0) {
+ // Do we have a pass folder?
+ const PassME* me_pass = (down_cast<const PassME*>(pass));
+ const char* passFolder = me_pass->GetDumpCFGFolder();
+ DCHECK(passFolder != nullptr);
+
+ if (passFolder[0] != 0) {
+ // Create directory prefix.
+ std::string prefix = GetDumpCFGFolder();
+ prefix += passFolder;
+ prefix += "/";
+
+ c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
+ }
+ }
+ }
+
+ c_unit->print_pass = old_print_pass;
+ }
+
+ // If the pass gate passed, we can declare success.
+ return should_apply_pass;
+ }
+
+ const char* GetDumpCFGFolder() const {
+ return dump_cfg_folder_;
+ }
+
protected:
/** @brief The data holder that contains data needed for the PassDriverME. */
PassMEDataHolder pass_me_data_holder_;
/** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
const char* dump_cfg_folder_;
-};
+ static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass,
+ DataflowIterator* iterator) {
+ // Paranoid: Check the iterator before walking the BasicBlocks.
+ DCHECK(iterator != nullptr);
+ bool change = false;
+ for (BasicBlock* bb = iterator->Next(change); bb != nullptr; bb = iterator->Next(change)) {
+ data->bb = bb;
+ change = pass->Worker(data);
+ }
+ }
+
+ template <typename Iterator>
+ inline static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass) {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = data->c_unit;
+ DCHECK(c_unit != nullptr);
+ Iterator iterator(c_unit->mir_graph.get());
+ DoWalkBasicBlocks(data, pass, &iterator);
+ }
+};
} // namespace art
#endif // ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
new file mode 100644
index 0000000000..52a2273c40
--- /dev/null
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "bb_optimizations.h"
+#include "compiler_internals.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+#include "pass_driver_me_opts.h"
+
+namespace art {
+
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ * - This is not yet an issue: no current pass would require it.
+ */
+// The initial list of passes to be used by the PassDriveMEOpts.
+template<>
+const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
+ GetPassInstance<CacheMethodLoweringInfo>(),
+ GetPassInstance<CallInlining>(),
+ GetPassInstance<CodeLayout>(),
+ GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<ClassInitCheckElimination>(),
+ GetPassInstance<BBCombine>(),
+ GetPassInstance<BBOptimizations>(),
+};
+
+// The number of the passes in the initial list of Passes (g_passes).
+template<>
+uint16_t const PassDriver<PassDriverMEOpts>::g_passes_size =
+ arraysize(PassDriver<PassDriverMEOpts>::g_passes);
+
+// The default pass list is used by the PassDriverME instance of PassDriver
+// to initialize pass_list_.
+template<>
+std::vector<const Pass*> PassDriver<PassDriverMEOpts>::g_default_pass_list(
+ PassDriver<PassDriverMEOpts>::g_passes,
+ PassDriver<PassDriverMEOpts>::g_passes +
+ PassDriver<PassDriverMEOpts>::g_passes_size);
+
+// By default, do not have a dump pass list.
+template<>
+std::string PassDriver<PassDriverMEOpts>::dump_pass_list_ = std::string();
+
+// By default, do not have a print pass list.
+template<>
+std::string PassDriver<PassDriverMEOpts>::print_pass_list_ = std::string();
+
+// By default, we do not print the pass' information.
+template<>
+bool PassDriver<PassDriverMEOpts>::default_print_passes_ = false;
+
+void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
+ // First call the base class' version.
+ PassDriver::ApplyPass(data, pass);
+
+ const PassME* pass_me = down_cast<const PassME*> (pass);
+ DCHECK(pass_me != nullptr);
+
+ PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+
+ // Now we care about flags.
+ if ((pass_me->GetFlag(kOptimizationBasicBlockChange) == true) ||
+ (pass_me->GetFlag(kOptimizationDefUsesChange) == true)) {
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h
new file mode 100644
index 0000000000..0a5b5aec99
--- /dev/null
+++ b/compiler/dex/pass_driver_me_opts.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
+
+#include "pass_driver_me.h"
+
+namespace art {
+
+// Forward Declarations.
+struct CompilationUnit;
+class Pass;
+class PassDataHolder;
+
+class PassDriverMEOpts : public PassDriverME<PassDriverMEOpts> {
+ public:
+ explicit PassDriverMEOpts(CompilationUnit* cu):PassDriverME<PassDriverMEOpts>(cu) {
+ }
+
+ ~PassDriverMEOpts() {
+ }
+
+ /**
+ * @brief Apply a patch: perform start/work/end functions.
+ */
+ virtual void ApplyPass(PassDataHolder* data, const Pass* pass);
+};
+
+} // namespace art
+#endif // ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
new file mode 100644
index 0000000000..cb63f4184f
--- /dev/null
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "post_opt_passes.h"
+#include "compiler_internals.h"
+#include "pass_driver_me_post_opt.h"
+
+namespace art {
+
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ * - This is not yet an issue: no current pass would require it.
+ */
+// The initial list of passes to be used by the PassDriveMEPostOpt.
+template<>
+const Pass* const PassDriver<PassDriverMEPostOpt>::g_passes[] = {
+ GetPassInstance<InitializeData>(),
+ GetPassInstance<ClearPhiInstructions>(),
+ GetPassInstance<CalculatePredecessors>(),
+ GetPassInstance<DFSOrders>(),
+ GetPassInstance<BuildDomination>(),
+ GetPassInstance<DefBlockMatrix>(),
+ GetPassInstance<CreatePhiNodes>(),
+ GetPassInstance<ClearVisitedFlag>(),
+ GetPassInstance<SSAConversion>(),
+ GetPassInstance<PhiNodeOperands>(),
+ GetPassInstance<ConstantPropagation>(),
+ GetPassInstance<PerformInitRegLocations>(),
+ GetPassInstance<MethodUseCount>(),
+ GetPassInstance<FreeData>(),
+};
+
+// The number of the passes in the initial list of Passes (g_passes).
+template<>
+uint16_t const PassDriver<PassDriverMEPostOpt>::g_passes_size =
+ arraysize(PassDriver<PassDriverMEPostOpt>::g_passes);
+
+// The default pass list is used by the PassDriverME instance of PassDriver
+// to initialize pass_list_.
+template<>
+std::vector<const Pass*> PassDriver<PassDriverMEPostOpt>::g_default_pass_list(
+ PassDriver<PassDriverMEPostOpt>::g_passes,
+ PassDriver<PassDriverMEPostOpt>::g_passes +
+ PassDriver<PassDriverMEPostOpt>::g_passes_size);
+
+// By default, do not have a dump pass list.
+template<>
+std::string PassDriver<PassDriverMEPostOpt>::dump_pass_list_ = std::string();
+
+// By default, do not have a print pass list.
+template<>
+std::string PassDriver<PassDriverMEPostOpt>::print_pass_list_ = std::string();
+
+// By default, we do not print the pass' information.
+template<>
+bool PassDriver<PassDriverMEPostOpt>::default_print_passes_ = false;
+
+} // namespace art
diff --git a/compiler/dex/pass_driver_me_post_opt.h b/compiler/dex/pass_driver_me_post_opt.h
new file mode 100644
index 0000000000..574a6ba04d
--- /dev/null
+++ b/compiler/dex/pass_driver_me_post_opt.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
+
+#include "pass_driver_me.h"
+
+namespace art {
+
+// Forward Declarations.
+struct CompilationUnit;
+class Pass;
+class PassDataHolder;
+
+class PassDriverMEPostOpt : public PassDriverME<PassDriverMEPostOpt> {
+ public:
+ explicit PassDriverMEPostOpt(CompilationUnit* cu) : PassDriverME<PassDriverMEPostOpt>(cu) {
+ }
+
+ ~PassDriverMEPostOpt() {
+ }
+};
+
+} // namespace art
+#endif // ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 069fb45dc4..9efd5aeb40 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -32,6 +32,9 @@ class Pass;
* @details Each enum should be a power of 2 to be correctly used.
*/
enum OptimizationFlag {
+ kOptimizationBasicBlockChange = 1, /**< @brief Has there been a change to a BasicBlock? */
+ kOptimizationDefUsesChange = 2, /**< @brief Has there been a change to a def-use? */
+ kLoopStructureChange = 4, /**< @brief Has there been a loop structural change? */
};
// Data holder class.
@@ -93,7 +96,7 @@ class PassME: public Pass {
/** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
const DataFlowAnalysisMode traversal_type_;
- /** @brief Flags for additional directives: used to determine if a particular clean-up is necessary post pass. */
+ /** @brief Flags for additional directives: used to determine if a particular post-optimization pass is necessary. */
const unsigned int flags_;
/** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
diff --git a/compiler/dex/post_opt_passes.cc b/compiler/dex/post_opt_passes.cc
new file mode 100644
index 0000000000..58700a4bd3
--- /dev/null
+++ b/compiler/dex/post_opt_passes.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "post_opt_passes.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+
+namespace art {
+
+/*
+ * MethodUseCount pass implementation start.
+ */
+bool MethodUseCount::Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ // First initialize the data.
+ c_unit->mir_graph->InitializeMethodUses();
+
+ // Now check if the pass is to be ignored.
+ bool res = ((c_unit->disable_opt & (1 << kPromoteRegs)) == 0);
+
+ return res;
+}
+
+bool MethodUseCount::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ c_unit->mir_graph->CountUses(bb);
+ // No need of repeating, so just return false.
+ return false;
+}
+
+
+bool ClearPhiInstructions::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ MIR* mir = bb->first_mir_insn;
+
+ while (mir != nullptr) {
+ MIR* next = mir->next;
+
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+
+ if (opcode == static_cast<Instruction::Code> (kMirOpPhi)) {
+ bb->RemoveMIR(mir);
+ }
+
+ mir = next;
+ }
+
+ // We do not care in reporting a change or not in the MIR.
+ return false;
+}
+
+void CalculatePredecessors::Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ // First get the MIRGraph here to factorize a bit the code.
+ MIRGraph *mir_graph = c_unit->mir_graph.get();
+
+ // First clear all predecessors.
+ AllNodesIterator first(mir_graph);
+ for (BasicBlock* bb = first.Next(); bb != nullptr; bb = first.Next()) {
+ bb->predecessors->Reset();
+ }
+
+ // Now calculate all predecessors.
+ AllNodesIterator second(mir_graph);
+ for (BasicBlock* bb = second.Next(); bb != nullptr; bb = second.Next()) {
+ // We only care about non hidden blocks.
+ if (bb->hidden == true) {
+ continue;
+ }
+
+ // Create iterator for visiting children.
+ ChildBlockIterator child_iter(bb, mir_graph);
+
+ // Now iterate through the children to set the predecessor bits.
+ for (BasicBlock* child = child_iter.Next(); child != nullptr; child = child_iter.Next()) {
+ child->predecessors->Insert(bb->id);
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
new file mode 100644
index 0000000000..f2035052c9
--- /dev/null
+++ b/compiler/dex/post_opt_passes.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_POST_OPT_PASSES_H_
+#define ART_COMPILER_DEX_POST_OPT_PASSES_H_
+
+#include "compiler_internals.h"
+#include "pass_me.h"
+
+namespace art {
+
+/**
+ * @class InitializeData
+ * @brief There is some data that needs to be initialized before performing
+ * the post optimization passes.
+ */
+class InitializeData : public PassME {
+ public:
+ InitializeData() : PassME("InitializeData") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ // New blocks may have been inserted so the first thing we do is ensure that
+ // the c_unit's number of blocks matches the actual count of basic blocks.
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->InitializeBasicBlockData();
+ c_unit->mir_graph.get()->SSATransformationStart();
+ }
+};
+
+/**
+ * @class MethodUseCount
+ * @brief Count the register uses of the method
+ */
+class MethodUseCount : public PassME {
+ public:
+ MethodUseCount() : PassME("UseCount") {
+ }
+
+ bool Worker(const PassDataHolder* data) const;
+
+ bool Gate(const PassDataHolder* data) const;
+};
+
+/**
+ * @class ClearPhiInformation
+ * @brief Clear the PHI nodes from the CFG.
+ */
+class ClearPhiInstructions : public PassME {
+ public:
+ ClearPhiInstructions() : PassME("ClearPhiInstructions") {
+ }
+
+ bool Worker(const PassDataHolder* data) const;
+};
+
+/**
+ * @class CalculatePredecessors
+ * @brief Calculate the predecessor BitVector of each Basicblock.
+ */
+class CalculatePredecessors : public PassME {
+ public:
+ CalculatePredecessors() : PassME("CalculatePredecessors") {
+ }
+
+ void Start(const PassDataHolder* data) const;
+};
+
+/**
+ * @class DFSOrders
+ * @brief Compute the DFS order of the MIR graph
+ */
+class DFSOrders : public PassME {
+ public:
+ DFSOrders() : PassME("DFSOrders") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->ComputeDFSOrders();
+ }
+};
+
+/**
+ * @class BuildDomination
+ * @brief Build the domination information of the MIR Graph
+ */
+class BuildDomination : public PassME {
+ public:
+ BuildDomination() : PassME("BuildDomination") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->ComputeDominators();
+ c_unit->mir_graph.get()->CompilerInitializeSSAConversion();
+ }
+
+ void End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ // Verify the dataflow information after the pass.
+ if (c_unit->enable_debug & (1 << kDebugVerifyDataflow)) {
+ c_unit->mir_graph->VerifyDataflow();
+ }
+ }
+};
+
+/**
+ * @class DefBlockMatrix
+ * @brief Calculate the matrix of definition per basic block
+ */
+class DefBlockMatrix : public PassME {
+ public:
+ DefBlockMatrix() : PassME("DefBlockMatrix") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->ComputeDefBlockMatrix();
+ }
+};
+
+/**
+ * @class CreatePhiNodes
+ * @brief Pass to create the phi nodes after SSA calculation
+ */
+class CreatePhiNodes : public PassME {
+ public:
+ CreatePhiNodes() : PassME("CreatePhiNodes") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->InsertPhiNodes();
+ }
+};
+
+/**
+ * @class ClearVisitedFlag
+ * @brief Pass to clear the visited flag for all basic blocks.
+ */
+
+class ClearVisitedFlag : public PassME {
+ public:
+ ClearVisitedFlag() : PassME("ClearVisitedFlag") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->ClearAllVisitedFlags();
+ }
+};
+
+/**
+ * @class SSAConversion
+ * @brief Pass for SSA conversion of MIRs
+ */
+class SSAConversion : public PassME {
+ public:
+ SSAConversion() : PassME("SSAConversion") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ MIRGraph *mir_graph = c_unit->mir_graph.get();
+ mir_graph->DoDFSPreOrderSSARename(mir_graph->GetEntryBlock());
+ }
+};
+
+/**
+ * @class PhiNodeOperands
+ * @brief Pass to insert the Phi node operands to basic blocks
+ */
+class PhiNodeOperands : public PassME {
+ public:
+ PhiNodeOperands() : PassME("PhiNodeOperands", kPreOrderDFSTraversal) {
+ }
+
+ bool Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+ DCHECK(bb != nullptr);
+ c_unit->mir_graph->InsertPhiNodeOperands(bb);
+ // No need of repeating, so just return false.
+ return false;
+ }
+};
+
+/**
+ * @class InitRegLocations
+ * @brief Initialize Register Locations.
+ */
+class PerformInitRegLocations : public PassME {
+ public:
+ PerformInitRegLocations() : PassME("PerformInitRegLocation") {
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InitRegLocations();
+ }
+};
+
+/**
+ * @class ConstantPropagation
+ * @brief Perform a constant propagation pass.
+ */
+class ConstantPropagation : public PassME {
+ public:
+ ConstantPropagation() : PassME("ConstantPropagation") {
+ }
+
+ bool Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+ DCHECK(bb != nullptr);
+ c_unit->mir_graph->DoConstantPropagation(bb);
+ // No need of repeating, so just return false.
+ return false;
+ }
+
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InitializeConstantPropagation();
+ }
+};
+
+/**
+ * @class FreeData
+ * @brief There is some data that needs to be freed after performing the post optimization passes.
+ */
+class FreeData : public PassME {
+ public:
+ FreeData() : PassME("FreeData") {
+ }
+
+ void End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->SSATransformationEnd();
+ }
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_POST_OPT_PASSES_H_
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index a6d56bdf3b..eef3294b7c 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -443,14 +443,10 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
rl_src.reg = TargetReg(kArg0);
rl_src.home = false;
MarkLive(rl_src);
- if (rl_method.wide) {
- StoreValueWide(rl_method, rl_src);
- } else {
- StoreValue(rl_method, rl_src);
- }
+ StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
+ StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0));
}
if (cu_->num_ins == 0) {
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 59ae16ed36..058b89c499 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -407,7 +407,16 @@ RegStorage Mir2Lir::AllocTempWide() {
}
RegStorage Mir2Lir::AllocTempWord() {
- return (Is64BitInstructionSet(cu_->instruction_set)) ? AllocTempWide() : AllocTemp();
+ // FIXME: temporary workaround. For bring-up purposes, x86_64 needs the ability
+ // to allocate wide values as a pair of core registers. However, we can't hold
+ // a reference in a register pair. This workaround will be removed when the
+ // reference handling code is reworked, or x86_64 backend starts using wide core
+ // registers - whichever happens first.
+ if (cu_->instruction_set == kX86_64) {
+ return AllocTemp();
+ } else {
+ return (Is64BitInstructionSet(cu_->instruction_set)) ? AllocTempWide() : AllocTemp();
+ }
}
RegStorage Mir2Lir::AllocTempSingle() {
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 0c5a4ca1d1..bd6bc22531 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -244,9 +244,9 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
/* Calculate DF_up */
for (uint32_t dominated_idx : bb->i_dominated->Indexes()) {
- BasicBlock *dominated_bb = GetBasicBlock(dominated_idx);
+ BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
for (uint32_t df_up_block_idx : dominated_bb->dom_frontier->Indexes()) {
- BasicBlock *df_up_block = GetBasicBlock(df_up_block_idx);
+ BasicBlock* df_up_block = GetBasicBlock(df_up_block_idx);
CheckForDominanceFrontier(bb, df_up_block);
}
}
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 95b3d86d5f..a4c62ade40 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -425,6 +425,9 @@ void MIRGraph::InitRegLocations() {
loc[ct->s_reg_low].defined = true;
}
+ /* Treat Method* as a normal reference */
+ loc[GetMethodSReg()].ref = true;
+
reg_location_ = loc;
int num_regs = cu_->num_dalvik_registers;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 9927fe1aa3..8f4eddbea3 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -60,7 +60,7 @@ class JniCompilerTest : public CommonCompilerTest {
} else {
method = c->FindVirtualMethod(method_name, method_sig);
}
- ASSERT_TRUE(method != NULL) << method_name << " " << method_sig;
+ ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
if (method->GetEntryPointFromQuickCompiledCode() == nullptr) {
ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() == nullptr);
CompileMethod(method);
@@ -88,16 +88,16 @@ class JniCompilerTest : public CommonCompilerTest {
// JNI operations after runtime start.
env_ = Thread::Current()->GetJniEnv();
jklass_ = env_->FindClass("MyClassNatives");
- ASSERT_TRUE(jklass_ != NULL) << method_name << " " << method_sig;
+ ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig;
if (direct) {
jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig);
} else {
jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig);
}
- ASSERT_TRUE(jmethod_ != NULL) << method_name << " " << method_sig;
+ ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig;
- if (native_fnptr != NULL) {
+ if (native_fnptr != nullptr) {
JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } };
ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1))
<< method_name << " " << method_sig;
@@ -107,7 +107,7 @@ class JniCompilerTest : public CommonCompilerTest {
jmethodID constructor = env_->GetMethodID(jklass_, "<init>", "()V");
jobj_ = env_->NewObject(jklass_, constructor);
- ASSERT_TRUE(jobj_ != NULL) << method_name << " " << method_sig;
+ ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig;
}
public:
@@ -125,13 +125,14 @@ jclass JniCompilerTest::jklass_;
jobject JniCompilerTest::jobj_;
jobject JniCompilerTest::class_loader_;
+
int gJava_MyClassNatives_foo_calls = 0;
void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
Locks::mutator_lock_->AssertNotHeld(Thread::Current());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_foo_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -151,8 +152,8 @@ TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
TEST_DISABLED_FOR_PORTABLE();
- SetUpForTest(false, "bar", "(I)I",
- NULL /* calling through stub will link with &Java_MyClassNatives_bar */);
+ SetUpForTest(false, "bar", "(I)I", nullptr);
+ // calling through stub will link with &Java_MyClassNatives_bar
ScopedObjectAccess soa(Thread::Current());
std::string reason;
@@ -168,8 +169,8 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
TEST_DISABLED_FOR_PORTABLE();
- SetUpForTest(true, "sbar", "(I)I",
- NULL /* calling through stub will link with &Java_MyClassNatives_sbar */);
+ SetUpForTest(true, "sbar", "(I)I", nullptr);
+ // calling through stub will link with &Java_MyClassNatives_sbar
ScopedObjectAccess soa(Thread::Current());
std::string reason;
@@ -188,7 +189,7 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooI_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -215,7 +216,7 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooII_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -243,7 +244,7 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y)
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooJJ_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -272,7 +273,7 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooDD_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -302,7 +303,7 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong
// 1 = thisObj
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooJJ_synchronized_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -329,7 +330,7 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject
// 3 = this + y + z
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
gJava_MyClassNatives_fooIOO_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -353,28 +354,28 @@ TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
reinterpret_cast<void*>(&Java_MyClassNatives_fooIOO));
EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls);
- jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, NULL);
+ jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, nullptr);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, NULL, jklass_);
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, jklass_);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, NULL, jklass_);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, nullptr, jklass_);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, NULL, jklass_);
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, nullptr, jklass_);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, NULL);
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, NULL);
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls);
- result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, NULL);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls);
}
@@ -383,7 +384,7 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
// 1 = klass
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(klass != NULL);
+ EXPECT_TRUE(klass != nullptr);
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
gJava_MyClassNatives_fooSII_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -407,7 +408,7 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble
// 1 = klass
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(klass != NULL);
+ EXPECT_TRUE(klass != nullptr);
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
gJava_MyClassNatives_fooSDD_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -437,7 +438,7 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y
// 3 = klass + y + z
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(klass != NULL);
+ EXPECT_TRUE(klass != nullptr);
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
gJava_MyClassNatives_fooSIOO_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -462,28 +463,28 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
reinterpret_cast<void*>(&Java_MyClassNatives_fooSIOO));
EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls);
- jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+ jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls);
}
@@ -492,7 +493,7 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject
// 3 = klass + y + z
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(klass != NULL);
+ EXPECT_TRUE(klass != nullptr);
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
gJava_MyClassNatives_fooSSIOO_calls++;
ScopedObjectAccess soa(Thread::Current());
@@ -516,28 +517,28 @@ TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
reinterpret_cast<void*>(&Java_MyClassNatives_fooSSIOO));
EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls);
- jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, NULL);
+ jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, NULL, jobj_);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, NULL, jobj_);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, NULL, jobj_);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, NULL);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jklass_, result));
EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, NULL);
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(jobj_, result));
EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls);
- result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, NULL);
- EXPECT_TRUE(env_->IsSameObject(NULL, result));
+ result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
+ EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls);
}
@@ -591,7 +592,7 @@ TEST_F(JniCompilerTest, ExceptionHandling) {
jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
if (i <= 0) {
- // We want to check raw Object*/Array* below
+ // We want to check raw Object* / Array* below
ScopedObjectAccess soa(env);
// Build stack trace
@@ -599,7 +600,7 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
- EXPECT_TRUE(trace_array != NULL);
+ EXPECT_TRUE(trace_array != nullptr);
EXPECT_EQ(11, trace_array->GetLength());
// Check stack trace entries have expected values
@@ -615,9 +616,9 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
return 0;
} else {
jclass jklass = env->FindClass("MyClassNatives");
- EXPECT_TRUE(jklass != NULL);
+ EXPECT_TRUE(jklass != nullptr);
jmethodID jmethod = env->GetMethodID(jklass, "fooI", "(I)I");
- EXPECT_TRUE(jmethod != NULL);
+ EXPECT_TRUE(jmethod != nullptr);
// Recurse with i - 1
jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1);
@@ -721,7 +722,7 @@ TEST_F(JniCompilerTest, GetText) {
TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
TEST_DISABLED_FOR_PORTABLE();
- SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", NULL);
+ SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", nullptr);
// This space intentionally left blank. Just testing compilation succeeds.
}
@@ -804,7 +805,7 @@ TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) {
jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1, jfloat f2) {
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
ScopedObjectAccess soa(Thread::Current());
EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
@@ -826,12 +827,12 @@ TEST_F(JniCompilerTest, CompileAndRunFloatFloatMethod) {
}
void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
- /*EXPECT_EQ(kNative, Thread::Current()->GetState());
- EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
- EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
- ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ(1U, Thread::Current()->NumStackReferences());*/
+// EXPECT_EQ(kNative, Thread::Current()->GetState());
+// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
+// EXPECT_TRUE(thisObj != nullptr);
+// EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+// ScopedObjectAccess soa(Thread::Current());
+// EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
EXPECT_EQ(i1, 1234);
EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0));
}
@@ -879,7 +880,7 @@ void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject thisObj,
jobject o248, jobject o249, jobject o250, jobject o251, jobject o252, jobject o253) {
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
- EXPECT_TRUE(thisObj != NULL);
+ EXPECT_TRUE(thisObj != nullptr);
EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
ScopedObjectAccess soa(Thread::Current());
EXPECT_GE(255U, Thread::Current()->NumStackReferences());
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 649a80ff68..f0c0ed72bf 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -143,9 +143,10 @@ ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ffd27ee37d..0a00d7d8ac 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -95,7 +95,7 @@ FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
CHECK(IsCurrentParamOnStack());
FrameOffset result =
FrameOffset(displacement_.Int32Value() + // displacement
- kFramePointerSize + // Method*
+ sizeof(StackReference<mirror::ArtMethod>) + // Method ref
(itr_slots_ * sizeof(uint32_t))); // offset into in args
return result;
}
@@ -196,9 +196,10 @@ ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
size_t Arm64JniCallingConvention::FrameSize() {
// Method*, callee save area size, local reference segment state
- size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 2a6e7d96cd..efc0b42db4 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -319,7 +319,8 @@ class JniCallingConvention : public CallingConvention {
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); // above Method*
+ return FrameOffset(this->displacement_.Int32Value() + sizeof(StackReference<mirror::ArtMethod>));
+ // above Method reference
}
FrameOffset HandleScopeLinkOffset() const {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 0402fe6eb8..f7a7be7304 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -147,9 +147,10 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
size_t MipsJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 97b4cdf8ac..9bf7d0f071 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -124,9 +124,10 @@ uint32_t X86JniCallingConvention::CoreSpillMask() const {
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 4871c879a8..5febed24fe 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -96,7 +96,7 @@ ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return FrameOffset(displacement_.Int32Value() + // displacement
- kFramePointerSize + // Method*
+ sizeof(StackReference<mirror::ArtMethod>) + // Method ref
(itr_slots_ * sizeof(uint32_t))); // offset into in args
}
@@ -139,9 +139,10 @@ uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 27188b2331..009b227209 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -530,7 +530,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value());
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, base.Int32Value());
LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
___ Blr(reg_x(scratch.AsCoreRegister()));
}
@@ -656,16 +656,17 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// trashed by native code.
___ Mov(reg_x(ETR), reg_x(TR));
- // Increate frame to required size - must be at least space to push Method*.
+ // Increase frame to required size - must be at least space to push StackReference<Method>.
CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
IncreaseFrameSize(adjust);
- // Write Method*.
- StoreToOffset(X0, SP, 0);
+ // Write StackReference<Method>.
+ DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+ StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + kFramePointerSize;
+ int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 0791c63f90..56c6536fe5 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1411,10 +1411,12 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// return address then method on stack
addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
- kFramePointerSize /*method*/ + kFramePointerSize /*return address*/));
+ sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
+ kFramePointerSize /*return address*/));
pushl(method_reg.AsX86().AsCpuRegister());
for (size_t i = 0; i < entry_spills.size(); ++i) {
- movl(Address(ESP, frame_size + kFramePointerSize + (i * kFramePointerSize)),
+ movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
+ (i * kFramePointerSize)),
entry_spills.at(i).AsX86().AsCpuRegister());
}
}
@@ -1422,7 +1424,8 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
void X86Assembler::RemoveFrame(size_t frame_size,
const std::vector<ManagedRegister>& spill_regs) {
CHECK_ALIGNED(frame_size, kStackAlignment);
- addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) - kFramePointerSize));
+ addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) -
+ sizeof(StackReference<mirror::ArtMethod>)));
for (size_t i = 0; i < spill_regs.size(); ++i) {
popl(spill_regs.at(i).AsX86().AsCpuRegister());
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0ede8755e3..a14551c3b7 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -59,7 +59,6 @@ void X86_64Assembler::call(Label* label) {
EmitLabel(label, kSize);
}
-
void X86_64Assembler::pushq(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1652,8 +1651,12 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
}
// return address then method on stack
addq(CpuRegister(RSP), Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
- kFramePointerSize /*method*/ + kFramePointerSize /*return address*/));
- pushq(method_reg.AsX86_64().AsCpuRegister());
+ sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
+ kFramePointerSize /*return address*/));
+
+ DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
+ subq(CpuRegister(RSP), Immediate(4));
+ movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
for (size_t i = 0; i < entry_spills.size(); ++i) {
ManagedRegisterSpill spill = entry_spills.at(i);
@@ -1732,7 +1735,7 @@ void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size)
void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
X86_64ManagedRegister src = msrc.AsX86_64();
CHECK(src.IsCpuRegister());
- movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
}
void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
@@ -2070,7 +2073,7 @@ void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister
void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movq(scratch, Address(CpuRegister(RSP), base));
+ movl(scratch, Address(CpuRegister(RSP), base));
call(Address(scratch, offset));
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 4d3d6646cf..c6b1aa5b90 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -33,7 +33,7 @@
#include "compiler.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
-#include "dex/pass_driver_me.h"
+#include "dex/pass_driver_me_opts.h"
#include "dex/verification_results.h"
#include "driver/compiler_callbacks_impl.h"
#include "driver/compiler_driver.h"
@@ -919,18 +919,18 @@ static int dex2oat(int argc, char** argv) {
} else if (option == "--no-profile-file") {
// No profile
} else if (option == "--print-pass-names") {
- PassDriverME::PrintPassNames();
+ PassDriverMEOpts::PrintPassNames();
} else if (option.starts_with("--disable-passes=")) {
std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
- PassDriverME::CreateDefaultPassList(disable_passes);
+ PassDriverMEOpts::CreateDefaultPassList(disable_passes);
} else if (option.starts_with("--print-passes=")) {
std::string print_passes = option.substr(strlen("--print-passes=")).data();
- PassDriverME::SetPrintPassList(print_passes);
+ PassDriverMEOpts::SetPrintPassList(print_passes);
} else if (option == "--print-all-passes") {
- PassDriverME::SetPrintAllPasses();
+ PassDriverMEOpts::SetPrintAllPasses();
} else if (option.starts_with("--dump-cfg-passes=")) {
std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
- PassDriverME::SetDumpPassList(dump_passes);
+ PassDriverMEOpts::SetDumpPassList(dump_passes);
} else {
Usage("Unknown argument %s", option.data());
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index cba4ebf4e8..614eca1710 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -1040,8 +1040,13 @@ DISASSEMBLER_ENTRY(cmp,
instr++;
} else {
CHECK_EQ(immediate_bytes, 4u);
- args << StringPrintf("%d", *reinterpret_cast<const int32_t*>(instr));
- instr += 4;
+ if (prefix[2] == 0x66) { // Operand size override from 32-bit to 16-bit.
+ args << StringPrintf("%d", *reinterpret_cast<const int16_t*>(instr));
+ instr += 2;
+ } else {
+ args << StringPrintf("%d", *reinterpret_cast<const int32_t*>(instr));
+ instr += 4;
+ }
}
} else if (branch_bytes > 0) {
DCHECK(!has_modrm);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a0648b0ff4..17f0493835 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -35,11 +35,9 @@ LIBART_COMMON_SRC_FILES := \
base/unix_file/random_access_file_utils.cc \
base/unix_file/string_file.cc \
check_jni.cc \
- catch_block_stack_visitor.cc \
class_linker.cc \
common_throws.cc \
debugger.cc \
- deoptimize_stack_visitor.cc \
dex_file.cc \
dex_file_verifier.cc \
dex_instruction.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 45ff21f4a8..5220dc3ca9 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -337,30 +337,22 @@ TEST_F(ArchTest, X86_64) {
// The following tests are all for the running architecture. So we get away
// with just including it and not undefining it every time.
-
#if defined(__arm__)
#include "arch/arm/asm_support_arm.h"
-#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
#elif defined(__aarch64__)
#include "arch/arm64/asm_support_arm64.h"
-#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
#elif defined(__mips__)
#include "arch/mips/asm_support_mips.h"
-#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
#elif defined(__i386__)
#include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
#elif defined(__x86_64__)
#include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
#else
// This happens for the host test.
#ifdef __LP64__
#include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
#else
#include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
#endif
#endif
@@ -436,4 +428,13 @@ TEST_F(ArchTest, HeapReferenceSize) {
#endif
}
+TEST_F(ArchTest, StackReferenceSize) {
+#if defined(STACK_REFERENCE_SIZE)
+ EXPECT_EQ(sizeof(StackReference<mirror::Object>),
+ static_cast<size_t>(STACK_REFERENCE_SIZE));
+#else
+ LOG(INFO) << "No expected StackReference Size #define found.";
+#endif
+}
+
} // namespace art
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index e55885f77a..422e20cf44 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -43,5 +43,7 @@
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 97caa1fd61..28bf856b37 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -559,8 +559,9 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=5*8 // x4, x5, SP, LR & FP saved.
-SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
+SAVE_SIZE=6*8 // x4, x5, x19(wSUSPEND), SP, LR & FP saved.
+SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
+
mov x9, sp // Save stack pointer.
.cfi_register sp,x9
@@ -574,8 +575,9 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
- str x9, [x10, #32] // Save old stack pointer.
+ stp x9, x19, [x10, #32] // Save old stack pointer and x19(wSUSPEND)
.cfi_rel_offset sp, 32
+ .cfi_rel_offset x19, 40
stp x4, x5, [x10, #16] // Save result and shorty addresses.
.cfi_rel_offset x4, 16
@@ -597,7 +599,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #8 // Destination address is bottom of stack + NULL.
+ add x9, sp, #4 // Destination address is bottom of stack + NULL.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -611,9 +613,12 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
.LendCopyParams\@:
- // Store NULL into Method* at bottom of frame.
- str xzr, [sp]
+ // Store NULL into StackReference<Method>* at bottom of frame.
+ str wzr, [sp]
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(ARM64) size not as expected."
+#endif
.endm
.macro INVOKE_STUB_CALL_AND_RETURN
@@ -651,7 +656,8 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
str x0, [x4]
.Lexit_art_quick_invoke_stub\@:
- ldr x2, [x29, #32] // Restore stack pointer.
+ ldp x2, x19, [x29, #32] // Restore stack pointer and x19.
+ .cfi_restore x19
mov sp, x2
.cfi_restore sp
@@ -687,7 +693,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
* | uint32_t out[n-1] |
* | : : | Outs
* | uint32_t out[0] |
- * | ArtMethod* NULL | <- SP
+ * | StackRef<ArtMethod> | <- SP value=null
* +----------------------+
*
* Outgoing registers:
@@ -1289,7 +1295,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
@@ -1303,7 +1309,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x3, xSELF // pass Thread::Current
mov x4, sp // pass SP
bl \entrypoint
@@ -1317,7 +1323,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x4, xSELF // pass Thread::Current
mov x5, sp // pass SP
bl \entrypoint
@@ -1356,7 +1362,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
ENTRY art_quick_set64_static
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, x1 // Store value
- ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
+ ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, x3 // Put value param
mov x3, xSELF // pass Thread::Current
mov x4, sp // pass SP
@@ -1420,7 +1426,7 @@ END art_quick_proxy_invoke_handler
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
- ldr x0, [sp, #0] // load caller Method*
+ ldr w0, [sp, #0] // load caller Method*
ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data
ldr w0, [x0, x12, lsl 2] // load the target method
@@ -1434,7 +1440,7 @@ ENTRY art_quick_resolution_trampoline
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov x9, x0 // Remember returned code pointer in x9.
- ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
+ ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
br x9
1:
@@ -1484,7 +1490,7 @@ END art_quick_resolution_trampoline
* | D2 | float arg 3
* | D1 | float arg 2
* | D0 | float arg 1
- * | RDI/Method* | <- X0
+ * | Method* | <- X0
* #-------------------#
* | local ref cookie | // 4B
* | handle scope size | // 4B
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index fac988310a..44edd4b076 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -225,7 +225,8 @@ class StubTest : public CommonRuntimeTest {
"cmp x1, x2\n\t"
"b.ne 1f\n\t"
- "mov %[fpr_result], #0\n\t"
+ "mov x2, #0\n\t"
+ "str x2, %[fpr_result]\n\t"
// Finish up.
"2:\n\t"
@@ -247,15 +248,16 @@ class StubTest : public CommonRuntimeTest {
// Failed fpr verification.
"1:\n\t"
- "mov %[fpr_result], #1\n\t"
+ "mov x2, #1\n\t"
+ "str x2, %[fpr_result]\n\t"
"b 2b\n\t" // Goto finish-up
// End
"3:\n\t"
- : [result] "=r" (result), [fpr_result] "=r" (fpr_result)
+ : [result] "=r" (result)
// Use the result from r0
: [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
+ [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
: "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
#elif defined(__x86_64__)
// Note: Uses the native convention
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 29633fbb7e..bff8501cf2 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -41,5 +41,7 @@
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
+// Expected size of a stack reference
+#define STACK_REFERENCE_SIZE 4
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 971688da80..48c33d5b3d 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -384,16 +384,24 @@ DEFINE_FUNCTION art_quick_invoke_stub
PUSH r9 // Save r9/shorty*.
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
+
movl %edx, %r10d
- addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+ addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8 and r9 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
subq %rdx, %rsp // Reserve stack space for argument array.
- movq LITERAL(0), (%rsp) // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+ movl LITERAL(0), (%rsp) // Store NULL for method*
+
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // RAX := method to be called
movq %rsi, %r11 // R11 := arg_array
- leaq 8(%rsp), %rdi // Rdi is pointing just above the method* in the stack arguments.
+ leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
@@ -455,16 +463,24 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
PUSH r9 // Save r9/shorty*.
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
+
movl %edx, %r10d
- addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
+ addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8 and r9 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
subq %rdx, %rsp // Reserve stack space for argument array.
- movq LITERAL(0), (%rsp) // Store NULL for method*
+
+#if (STACK_REFERENCE_SIZE != 4)
+#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
+#endif
+ movl LITERAL(0), (%rsp) // Store NULL for method*
+
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // RAX := method to be called
movq %rsi, %r11 // R11 := arg_array
- leaq 8(%rsp), %rdi // Rdi is pointing just above the method* in the stack arguments.
+ leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
diff --git a/runtime/catch_block_stack_visitor.cc b/runtime/catch_block_stack_visitor.cc
deleted file mode 100644
index b8202765d3..0000000000
--- a/runtime/catch_block_stack_visitor.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "catch_block_stack_visitor.h"
-
-#include "dex_instruction.h"
-#include "mirror/art_method-inl.h"
-#include "quick_exception_handler.h"
-#include "handle_scope-inl.h"
-#include "verifier/method_verifier.h"
-
-namespace art {
-
-bool CatchBlockStackVisitor::VisitFrame() {
- exception_handler_->SetHandlerFrameId(GetFrameId());
- mirror::ArtMethod* method = GetMethod();
- if (method == nullptr) {
- // This is the upcall, we remember the frame and last pc so that we may long jump to them.
- exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
- exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
- return false; // End stack walk.
- } else {
- if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- } else {
- return HandleTryItems(method);
- }
- }
-}
-
-bool CatchBlockStackVisitor::HandleTryItems(mirror::ArtMethod* method) {
- uint32_t dex_pc = DexFile::kDexNoIndex;
- if (!method->IsNative()) {
- dex_pc = GetDexPc();
- }
- if (dex_pc != DexFile::kDexNoIndex) {
- bool clear_exception = false;
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
- uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
- exception_handler_->SetClearException(clear_exception);
- if (found_dex_pc != DexFile::kDexNoIndex) {
- exception_handler_->SetHandlerDexPc(found_dex_pc);
- exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
- exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
- return false; // End stack walk.
- }
- }
- return true; // Continue stack walk.
-}
-
-} // namespace art
diff --git a/runtime/catch_block_stack_visitor.h b/runtime/catch_block_stack_visitor.h
deleted file mode 100644
index f45cf037cf..0000000000
--- a/runtime/catch_block_stack_visitor.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
-#define ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
-
-#include "mirror/object-inl.h"
-#include "stack.h"
-#include "handle_scope-inl.h"
-
-namespace art {
-
-namespace mirror {
-class Throwable;
-} // namespace mirror
-class Context;
-class QuickExceptionHandler;
-class Thread;
-class ThrowLocation;
-
-// Finds catch handler or prepares deoptimization.
-class CatchBlockStackVisitor FINAL : public StackVisitor {
- public:
- CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
- QuickExceptionHandler* exception_handler)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), exception_(exception),
- exception_handler_(exception_handler) {
- }
-
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Thread* const self_;
- // The type of the exception catch block to find.
- Handle<mirror::Throwable>* exception_;
- QuickExceptionHandler* const exception_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
-};
-
-} // namespace art
-#endif // ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 84afb2d3f6..9d8888c167 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -37,19 +37,19 @@ inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* des
return FindClass(self, descriptor, NullHandle<mirror::ClassLoader>());
}
-inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* element_class) {
+inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
// Read the cached array class once to avoid races with other threads setting it.
mirror::Class* array_class = find_array_class_cache_[i];
- if (array_class != nullptr && array_class->GetComponentType() == element_class) {
+ if (array_class != nullptr && array_class->GetComponentType() == *element_class) {
return array_class;
}
}
- DCHECK(!element_class->IsPrimitiveVoid());
- std::string descriptor("[");
- descriptor += element_class->GetDescriptor();
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(element_class->GetClassLoader()));
+ DCHECK(!(*element_class)->IsPrimitiveVoid());
+ std::string descriptor = "[" + (*element_class)->GetDescriptor();
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle((*element_class)->GetClassLoader()));
+ HandleWrapper<mirror::Class> h_element_class(hs.NewHandleWrapper(element_class));
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
// Benign races in storing array class and incrementing index.
size_t victim_index = find_array_class_cache_next_victim_;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a8271eda9a..ccf0558689 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -84,7 +84,7 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds the array class given for the element class.
- mirror::Class* FindArrayClass(Thread* self, mirror::Class* element_class)
+ mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Reutrns true if the class linker is initialized.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index c11aeccee4..e397a5ce30 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -147,7 +147,8 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_STREQ(direct_interface0->GetDescriptor().c_str(), "Ljava/lang/Cloneable;");
mirror::Class* direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
EXPECT_STREQ(direct_interface1->GetDescriptor().c_str(), "Ljava/io/Serializable;");
- EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.Get());
+ mirror::Class* array_ptr = array->GetComponentType();
+ EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
}
void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
deleted file mode 100644
index 449ccce64f..0000000000
--- a/runtime/deoptimize_stack_visitor.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "deoptimize_stack_visitor.h"
-
-#include "mirror/art_method-inl.h"
-#include "object_utils.h"
-#include "quick_exception_handler.h"
-#include "handle_scope-inl.h"
-#include "verifier/method_verifier.h"
-
-namespace art {
-
-bool DeoptimizeStackVisitor::VisitFrame() {
- exception_handler_->SetHandlerFrameId(GetFrameId());
- mirror::ArtMethod* method = GetMethod();
- if (method == nullptr) {
- // This is the upcall, we remember the frame and last pc so that we may long jump to them.
- exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
- exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
- return false; // End stack walk.
- } else if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- } else {
- return HandleDeoptimization(method);
- }
-}
-
-bool DeoptimizeStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
- MethodHelper mh(m);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- CHECK(code_item != nullptr);
- uint16_t num_regs = code_item->registers_size_;
- uint32_t dex_pc = GetDexPc();
- const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
- uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
- StackHandleScope<2> hs(self_);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
- verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
- &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
- m->GetAccessFlags(), false, true, true);
- verifier.Verify();
- std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
- for (uint16_t reg = 0; reg < num_regs; ++reg) {
- VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
- switch (kind) {
- case kUndefined:
- new_frame->SetVReg(reg, 0xEBADDE09);
- break;
- case kConstant:
- new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
- break;
- case kReferenceVReg:
- new_frame->SetVRegReference(reg,
- reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
- break;
- default:
- new_frame->SetVReg(reg, GetVReg(m, reg, kind));
- break;
- }
- }
- if (prev_shadow_frame_ != nullptr) {
- prev_shadow_frame_->SetLink(new_frame);
- } else {
- self_->SetDeoptimizationShadowFrame(new_frame);
- }
- prev_shadow_frame_ = new_frame;
- return true;
-}
-
-} // namespace art
diff --git a/runtime/deoptimize_stack_visitor.h b/runtime/deoptimize_stack_visitor.h
deleted file mode 100644
index c41b80364b..0000000000
--- a/runtime/deoptimize_stack_visitor.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
-#define ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
-
-#include "base/mutex.h"
-#include "stack.h"
-#include "thread.h"
-
-namespace art {
-
-namespace mirror {
-class ArtMethod;
-} // namespace mirror
-class QuickExceptionHandler;
-class Thread;
-
-// Prepares deoptimization.
-class DeoptimizeStackVisitor FINAL : public StackVisitor {
- public:
- DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
- prev_shadow_frame_(nullptr) {
- CHECK(!self_->HasDeoptimizationShadowFrame());
- }
-
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Thread* const self_;
- QuickExceptionHandler* const exception_handler_;
- ShadowFrame* prev_shadow_frame_;
-
- DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
-};
-
-} // namespace art
-#endif // ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 3fd4adc0a7..b582abb1a6 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -26,12 +26,12 @@ class ArtMethod;
} // namespace mirror
// Place a special frame at the TOS that will save the callee saves for the given type.
-static inline void FinishCalleeSaveFrameSetup(Thread* self, mirror::ArtMethod** sp,
+static inline void FinishCalleeSaveFrameSetup(Thread* self, StackReference<mirror::ArtMethod>* sp,
Runtime::CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Be aware the store below may well stomp on an incoming argument.
Locks::mutator_lock_->AssertSharedHeld(self);
- *sp = Runtime::Current()->GetCalleeSaveMethod(type);
+ sp->Assign(Runtime::Current()->GetCalleeSaveMethod(type));
self->SetTopOfStack(sp, 0);
self->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index ccc0f3ded5..330125458e 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -27,32 +27,36 @@ namespace art {
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \
@@ -60,7 +64,7 @@ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, method, component_count, self, \
@@ -68,7 +72,7 @@ extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, method, component_count, self, \
@@ -76,7 +80,7 @@ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2(
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
if (!instrumented_bool) { \
@@ -87,7 +91,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- mirror::ArtMethod** sp) \
+ StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
if (!instrumented_bool) { \
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6448045e27..47fb9d66f8 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,7 +28,7 @@
namespace art {
-extern "C" void artDeoptimize(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artDeoptimize(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index ab428a546f..53c9b97c29 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -28,7 +28,7 @@ namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
@@ -39,7 +39,8 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -47,10 +48,9 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* referrer,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -59,7 +59,8 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(mirror::ArtMethod* referrer,
int32_t string_idx,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return ResolveStringFromCode(referrer, string_idx);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index c38a5959dd..844367d2cb 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,7 @@ namespace art {
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int32_t));
@@ -44,7 +44,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int64_t));
@@ -61,7 +61,8 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -79,7 +80,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int32_t));
@@ -102,7 +103,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object*
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int64_t));
@@ -126,7 +127,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object*
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer,
Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -149,7 +150,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror:
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int32_t));
@@ -169,7 +170,8 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
}
extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- uint64_t new_value, Thread* self, mirror::ArtMethod** sp)
+ uint64_t new_value, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int64_t));
@@ -190,7 +192,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* ref
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -214,7 +216,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
@@ -240,13 +242,15 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
uint32_t frame_size =
runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes();
- mirror::ArtMethod* referrer = sp[frame_size / sizeof(mirror::ArtMethod*)];
+ mirror::ArtMethod* referrer =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(
+ reinterpret_cast<uint8_t*>(sp) + frame_size)->AsMirrorPtr();
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
if (LIKELY(field != NULL && obj != NULL)) {
@@ -254,7 +258,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
field->Set64<false>(obj, new_value);
return 0; // success
}
- *sp = callee_save;
+ sp->Assign(callee_save);
self->SetTopOfStack(sp, 0);
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int64_t));
@@ -274,7 +278,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
mirror::ArtMethod* referrer, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 8dac75039c..4ec28791fb 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -39,7 +39,7 @@ namespace art {
*/
extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array,
const Instruction::ArrayDataPayload* payload,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 11a4b3b6f6..6ef075da6b 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -26,7 +26,7 @@ namespace art {
extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method,
mirror::Object* this_object,
Thread* self,
- mirror::ArtMethod** sp,
+ StackReference<mirror::ArtMethod>* sp,
uintptr_t lr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
@@ -40,7 +40,8 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
return result;
}
-extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self,
+ StackReference<mirror::ArtMethod>* sp,
uint64_t gpr_result, uint64_t fpr_result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below.
@@ -50,7 +51,7 @@ extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::A
Locks::mutator_lock_->AssertSharedHeld(self);
Runtime* runtime = Runtime::Current();
mirror::ArtMethod* callee_save = runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
- *sp = callee_save;
+ sp->Assign(callee_save);
uint32_t return_pc_offset = callee_save->GetReturnPcOffsetInBytes(
runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly).FrameSizeInBytes());
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 5d36b4c904..140b0754b4 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -34,7 +34,7 @@ extern uint32_t JniMethodStart(Thread* self) {
DCHECK(env != nullptr);
uint32_t saved_local_ref_cookie = env->local_ref_cookie;
env->local_ref_cookie = env->locals.GetSegmentState();
- mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
if (!native_method->IsFastNative()) {
// When not fast JNI we transition out of runnable.
self->TransitionFromRunnableToSuspended(kNative);
@@ -49,7 +49,7 @@ extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) {
// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
- mirror::ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ mirror::ArtMethod* native_method = self->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr();
bool is_fast = native_method->IsFastNative();
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 817d053c9b..92c0841dd9 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -20,7 +20,8 @@
namespace art {
-extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -42,7 +43,8 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self, mirror::
}
}
-extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self, mirror::ArtMethod** sp)
+extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 53e725edba..f61c75492b 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -28,7 +28,7 @@ void CheckSuspendFromCode(Thread* thread)
CheckSuspend(thread);
}
-extern "C" void artTestSuspendFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artTestSuspendFromCode(Thread* thread, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 31eacac114..e6f294ace7 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -25,7 +25,8 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::ArtMethod** sp)
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->QuickDeliverException();
@@ -33,7 +34,7 @@ extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::ArtMe
// Called by generated call to throw an exception.
extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
@@ -55,7 +56,7 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread
// Called by generated call to throw a NPE exception.
extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -64,8 +65,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self,
- mirror::ArtMethod** sp)
+extern "C" void artThrowDivZeroFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArithmeticExceptionDivideByZero();
@@ -74,14 +74,14 @@ extern "C" void artThrowDivZeroFromCode(Thread* self,
// Called by generated call to throw an array index out of bounds exception.
extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>*sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::ArtMethod** sp)
+extern "C" void artThrowStackOverflowFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowStackOverflowError(self);
@@ -89,7 +89,7 @@ extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::ArtMethod**
}
extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowNoSuchMethodError(method_idx);
@@ -97,7 +97,7 @@ extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
}
extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
CHECK(!dest_type->IsAssignableFrom(src_type));
@@ -106,7 +106,7 @@ extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Cla
}
extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 554bff4701..1d524cb53b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -195,22 +195,22 @@ class QuickArgumentVisitor {
#endif
public:
- static mirror::ArtMethod* GetCallingMethod(mirror::ArtMethod** sp)
+ static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK((*sp)->IsCalleeSaveMethod());
+ DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
- return *reinterpret_cast<mirror::ArtMethod**>(previous_sp);
+ return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(mirror::ArtMethod** sp)
+ static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK((*sp)->IsCalleeSaveMethod());
+ DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
- QuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static,
+ QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
const char* shorty, uint32_t shorty_len)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
@@ -383,12 +383,12 @@ class QuickArgumentVisitor {
if (kQuickSoftFloatAbi) {
CHECK_EQ(kNumQuickFprArgs, 0U);
return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
- + GetBytesPerGprSpillLocation(kRuntimeISA) /* ArtMethod* */;
+ + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
} else {
// For now, there is no reg-spill area for the targets with
// hard float ABI. So, the offset pointing to the first method's
// parameter ('this' for non-static methods) should be returned.
- return GetBytesPerGprSpillLocation(kRuntimeISA); // Skip Method*.
+ return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
}
}
@@ -410,8 +410,9 @@ class QuickArgumentVisitor {
// Visits arguments on the stack placing them into the shadow frame.
class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickShadowFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
+ BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
+ size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -457,7 +458,7 @@ void BuildQuickShadowFrameVisitor::Visit() {
}
extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
- mirror::ArtMethod** sp)
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
@@ -510,9 +511,9 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
// to jobjects.
class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildQuickArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
- std::vector<jvalue>* args) :
+ BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len,
+ ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -577,7 +578,7 @@ void BuildQuickArgumentVisitor::FixupReferences() {
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
mirror::Object* receiver,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
@@ -585,7 +586,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
const char* old_cause =
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(*sp, proxy_method) << PrettyMethod(proxy_method);
+ DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
self->SetTopOfStack(sp, 0);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
@@ -629,8 +630,9 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
// so they don't get garbage collected.
class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
public:
- RememberForGcArgumentVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+ RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
+ const char* shorty, uint32_t shorty_len,
+ ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
@@ -666,7 +668,8 @@ void RememberForGcArgumentVisitor::FixupReferences() {
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
mirror::Object* receiver,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
// Start new JNI local reference state
@@ -821,7 +824,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// Fixup any locally saved objects may have moved during a GC.
visitor.FixupReferences();
// Place called method in callee-save frame to be placed as first argument to quick method.
- *sp = called;
+ sp->Assign(called);
return code;
}
@@ -1171,14 +1174,14 @@ class ComputeGenericJniFrameSize FINAL {
}
// WARNING: After this, *sp won't be pointing to the method anymore!
- void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
- void* sp, HandleScope** table, uint32_t* handle_scope_entries,
- uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
- void** code_return, size_t* overall_size)
+ void ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
+ uint32_t shorty_len, void* sp, HandleScope** table,
+ uint32_t* handle_scope_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
+ uint32_t** start_fpr, void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ComputeAll(is_static, shorty, shorty_len);
- mirror::ArtMethod* method = **m;
+ mirror::ArtMethod* method = (*m)->AsMirrorPtr();
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
@@ -1186,20 +1189,30 @@ class ComputeGenericJniFrameSize FINAL {
// We have to squeeze in the HandleScope, and relocate the method pointer.
// "Free" the slot for the method.
- sp8 += kPointerSize;
+ sp8 += kPointerSize; // In the callee-save frame we use a full pointer.
- // Add the HandleScope.
+ // Under the callee saves put handle scope and new method stack reference.
*handle_scope_entries = num_handle_scope_references_;
- size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
- sp8 -= handle_scope_size;
- *table = reinterpret_cast<HandleScope*>(sp8);
+
+ size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
+ size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
+
+ sp8 -= scope_and_method;
+ // Align by kStackAlignment
+ uintptr_t sp_to_align = reinterpret_cast<uintptr_t>(sp8);
+ sp_to_align = RoundDown(sp_to_align, kStackAlignment);
+ sp8 = reinterpret_cast<uint8_t*>(sp_to_align);
+
+ uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
+ *table = reinterpret_cast<HandleScope*>(sp8_table);
(*table)->SetNumberOfReferences(num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
- sp8 -= kPointerSize;
uint8_t* method_pointer = sp8;
- *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
- *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
+ StackReference<mirror::ArtMethod>* new_method_ref =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
+ new_method_ref->Assign(method);
+ *m = new_method_ref;
// Reference cookie and padding
sp8 -= 8;
@@ -1306,8 +1319,8 @@ class ComputeGenericJniFrameSize FINAL {
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
- uint32_t shorty_len, Thread* self) :
+ BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
+ const char* shorty, uint32_t shorty_len, Thread* self) :
QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
@@ -1320,7 +1333,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
}
}
@@ -1488,9 +1501,9 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock)
* 1) How many bytes of the alloca can be released, if the value is non-negative.
* 2) An error, if the value is negative.
*/
-extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
+extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* called = *sp;
+ mirror::ArtMethod* called = sp->AsMirrorPtr();
DCHECK(called->IsNative()) << PrettyMethod(called, true);
// run the visitor
@@ -1562,17 +1575,18 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
* Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
* unlocking.
*/
-extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
+extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
+ StackReference<mirror::ArtMethod>* sp,
jvalue result, uint64_t result_f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
- mirror::ArtMethod* called = *sp;
+ mirror::ArtMethod* called = sp->AsMirrorPtr();
uint32_t cookie = *(sp32 - 1);
jobject lock = nullptr;
if (called->IsSynchronized()) {
HandleScope* table = reinterpret_cast<HandleScope*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ reinterpret_cast<uint8_t*>(sp) + sizeof(StackReference<mirror::ArtMethod>));
lock = table->GetHandle(0).ToJObject();
}
@@ -1669,12 +1683,12 @@ static MethodAndCode GetSuccessValue(const void* code, mirror::ArtMethod* method
template<InvokeType type, bool access_check>
static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp);
+ Thread* self, StackReference<mirror::ArtMethod>* sp);
template<InvokeType type, bool access_check>
static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp) {
+ Thread* self, StackReference<mirror::ArtMethod>* sp) {
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
type);
if (UNLIKELY(method == nullptr)) {
@@ -1714,7 +1728,8 @@ static MethodAndCode artInvokeCommon(uint32_t method_idx, mirror::Object* this_o
MethodAndCode artInvokeCommon<type, access_check>(uint32_t method_idx, \
mirror::Object* this_object, \
mirror::ArtMethod* caller_method, \
- Thread* self, mirror::ArtMethod** sp) \
+ Thread* self, \
+ StackReference<mirror::ArtMethod>* sp) \
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -1731,48 +1746,43 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
// See comments in runtime_support_asm.S
extern "C" MethodAndCode artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
}
extern "C" MethodAndCode artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
}
@@ -1780,7 +1790,8 @@ extern "C" MethodAndCode artInvokeVirtualTrampolineWithAccessCheck(uint32_t meth
extern "C" MethodAndCode artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
mirror::Object* this_object,
mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp)
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method;
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 751cdb6181..99633a380b 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -192,7 +192,9 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
// Set up thread to appear as if we called out of method_g_ at pc dex 3
- thread->SetTopOfStack(reinterpret_cast<mirror::ArtMethod**>(&fake_stack[0]), method_g_->ToNativePc(dex_pc)); // return pc
+ thread->SetTopOfStack(
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]),
+ method_g_->ToNativePc(dex_pc)); // return pc
} else {
// Create/push fake 20-byte shadow frame for method g
fake_stack.push_back(0);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 15c38c1f4b..6b216c7e89 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -250,7 +250,8 @@ bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
- mirror::ArtMethod** frame = reinterpret_cast<mirror::ArtMethod**>(sp);
+ StackReference<mirror::ArtMethod>* frame =
+ reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp);
self->SetTopOfStack(frame, 0); // Since we don't necessarily have a dex pc, pass in 0.
self->DumpJavaStack(LOG(ERROR));
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e03b6f71ae..a962f0693f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1161,7 +1161,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
}
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
if (ptr == nullptr) {
- ThrowOutOfMemoryError(self, alloc_size, false);
+ ThrowOutOfMemoryError(self, alloc_size, allocator == kAllocatorTypeLOS);
}
return ptr;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e11671b7c7..e568b36ae9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -408,7 +408,7 @@ class Heap {
// Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- return GetTotalMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
+ return GetMaxMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
}
// get the space that corresponds to an object's address. Current implementation searches all
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index f2e059d6bc..8ff7086ecd 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -51,20 +51,12 @@ class HandleScope {
return header_size + data_size;
}
- // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedHandleScopeSize(uint32_t num_references) {
- size_t handle_scope_size = SizeOf(num_references);
- return RoundUp(handle_scope_size, 8);
- }
-
- // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedHandleScopeSizeTarget(size_t pointer_size, uint32_t num_references) {
+ // Returns the size of a HandleScope containing num_references handles.
+ static size_t SizeOf(size_t pointer_size, uint32_t num_references) {
// Assume that the layout is packed.
size_t header_size = pointer_size + sizeof(number_of_references_);
- // This assumes there is no layout change between 32 and 64b.
size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- size_t handle_scope_size = header_size + data_size;
- return RoundUp(handle_scope_size, 8);
+ return header_size + data_size;
}
// Link to previous HandleScope or null.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index f77a0f6d35..9cfba8d5d7 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -33,7 +33,7 @@ static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
DCHECK_GE(length, 0);
mirror::Class* element_class = reinterpret_cast<Object*>(args[0])->AsClass();
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, element_class);
+ mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
DCHECK(array_class != nullptr);
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
result->SetL(mirror::Array::Alloc<true>(self, array_class, length,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f69fecc6bb..029af8d88c 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -84,6 +84,17 @@ extern JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
+// Workaround for b/14882674 where clang allocates stack for each ThrowLocation created by calls to
+// ShadowFrame::GetCurrentLocationForThrow(). Moving the call here prevents from doing such
+// allocation in the interpreter itself.
+static inline void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) SOMETIMES_INLINE;
+
+static inline void ThrowNullPointerExceptionFromInterpreter(
+ const ShadowFrame& shadow_frame) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+}
+
static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
ref->MonitorEnter(self);
}
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 9a274f618a..99153c8498 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -462,7 +462,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -474,7 +474,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -516,7 +516,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -957,7 +957,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -975,7 +975,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -993,7 +993,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1011,7 +1011,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1029,7 +1029,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1047,7 +1047,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1065,7 +1065,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1083,7 +1083,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1102,7 +1102,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1121,7 +1121,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1140,7 +1140,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1159,7 +1159,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1178,7 +1178,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
@@ -1197,7 +1197,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 68759ad65a..5e4f5be51d 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -375,7 +375,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -387,7 +387,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -429,7 +429,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -855,7 +855,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -873,7 +873,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -891,7 +891,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -909,7 +909,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -927,7 +927,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -945,7 +945,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -963,7 +963,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -981,7 +981,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1000,7 +1000,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1019,7 +1019,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1038,7 +1038,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1057,7 +1057,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1076,7 +1076,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1095,7 +1095,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromInterpreter(shadow_frame);
HANDLE_PENDING_EXCEPTION();
break;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index a660183bcf..f1284db015 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2058,7 +2058,7 @@ class JNI {
return nullptr;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- array_class = class_linker->FindArrayClass(soa.Self(), element_class);
+ array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 49e0b54758..892e7f4acd 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -206,8 +206,6 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
// MAP_32BIT only available on x86_64.
void* actual = MAP_FAILED;
if (low_4gb && expected == nullptr) {
- flags |= MAP_FIXED;
-
bool first_run = true;
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -243,7 +241,14 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
0);
if (actual != MAP_FAILED) {
- break;
+ // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
+ // 4GB. If this is the case, unmap and retry.
+ if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
+ break;
+ } else {
+ munmap(actual, page_aligned_byte_count);
+ actual = MAP_FAILED;
+ }
}
} else {
// Skip over last page.
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 107664381a..f7b573729b 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -93,15 +93,17 @@ Array* Array::CreateMultiArray(Thread* self, Handle<Class> element_class,
// Find/generate the array class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::Class* element_class_ptr = element_class.Get();
StackHandleScope<1> hs(self);
Handle<mirror::Class> array_class(
- hs.NewHandle(class_linker->FindArrayClass(self, element_class.Get())));
+ hs.NewHandle(class_linker->FindArrayClass(self, &element_class_ptr)));
if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
for (int32_t i = 1; i < dimensions->GetLength(); ++i) {
- array_class.Assign(class_linker->FindArrayClass(self, array_class.Get()));
+ mirror::Class* array_class_ptr = array_class.Get();
+ array_class.Assign(class_linker->FindArrayClass(self, &array_class_ptr));
if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 39efa58ab1..5f4619b394 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -296,10 +296,16 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
// Generic JNI frame.
DCHECK(IsNative());
uint32_t handle_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
- size_t scope_size = HandleScope::GetAlignedHandleScopeSize(handle_refs);
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + scope_size,
- callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+
+ // Callee saves + handle scope + method ref + alignment
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
+ - kPointerSize // callee-save frame stores a whole method pointer
+ + sizeof(StackReference<mirror::ArtMethod>),
+ kStackAlignment);
+
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
}
const void* code_pointer = EntryPointToCodePointer(entry_point);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d55b5450c3..e5cc6716a4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -67,7 +67,8 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
return nullptr;
}
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
+ mirror::Class* array_class =
+ runtime->GetClassLinker()->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
@@ -90,7 +91,7 @@ static jobject VMRuntime_newUnpaddedArray(JNIEnv* env, jobject, jclass javaEleme
return nullptr;
}
Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
+ mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == nullptr)) {
return nullptr;
}
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index db77437db4..eae4584019 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -46,14 +46,14 @@ static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementCla
static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
ScopedFastNativeObjectAccess soa(env);
DCHECK(javaElementClass != NULL);
- mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(length < 0)) {
ThrowNegativeArraySizeException(length);
return NULL;
}
+ mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), element_class);
+ mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
if (UNLIKELY(array_class == NULL)) {
CHECK(soa.Self()->IsExceptionPending());
return NULL;
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 822aefa43c..606d62d8ce 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -31,7 +31,7 @@ class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable {
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
+ DCHECK(Self()->GetManagedStack()->GetTopQuickFrame()->AsMirrorPtr()->IsFastNative());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8300195d58..b9cec40ebf 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -16,25 +16,105 @@
#include "quick_exception_handler.h"
-#include "catch_block_stack_visitor.h"
-#include "deoptimize_stack_visitor.h"
+#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
-#include "mirror/art_method-inl.h"
#include "handle_scope-inl.h"
+#include "mirror/art_method-inl.h"
+#include "verifier/method_verifier.h"
namespace art {
+static constexpr bool kDebugExceptionDelivery = false;
+static constexpr size_t kInvalidFrameId = 0xffffffff;
+
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
: self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
- handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
- clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
+ handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_method_(nullptr),
+ handler_dex_pc_(0), clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
}
+// Finds catch handler or prepares for deoptimization.
+class CatchBlockStackVisitor FINAL : public StackVisitor {
+ public:
+ CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
+ QuickExceptionHandler* exception_handler)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(self, context), self_(self), exception_(exception),
+ exception_handler_(exception_handler) {
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = GetMethod();
+ exception_handler_->SetHandlerFrameId(GetFrameId());
+ if (method == nullptr) {
+ // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+ exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ uint32_t next_dex_pc;
+ mirror::ArtMethod* next_art_method;
+ bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
+ // Report the method that did the down call as the handler.
+ exception_handler_->SetHandlerDexPc(next_dex_pc);
+ exception_handler_->SetHandlerMethod(next_art_method);
+ if (!has_next) {
+ // No next method? Check exception handler is set up for the unhandled exception handler
+ // case.
+ DCHECK_EQ(0U, exception_handler_->GetHandlerDexPc());
+ DCHECK(nullptr == exception_handler_->GetHandlerMethod());
+ }
+ return false; // End stack walk.
+ }
+ if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ }
+ return HandleTryItems(method);
+ }
+
+ private:
+ bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t dex_pc = DexFile::kDexNoIndex;
+ if (!method->IsNative()) {
+ dex_pc = GetDexPc();
+ }
+ if (dex_pc != DexFile::kDexNoIndex) {
+ bool clear_exception = false;
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
+ uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
+ exception_handler_->SetClearException(clear_exception);
+ if (found_dex_pc != DexFile::kDexNoIndex) {
+ exception_handler_->SetHandlerMethod(method);
+ exception_handler_->SetHandlerDexPc(found_dex_pc);
+ exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ return false; // End stack walk.
+ }
+ }
+ return true; // Continue stack walk.
+ }
+
+ Thread* const self_;
+ // The exception we're looking for the catch block of.
+ Handle<mirror::Throwable>* exception_;
+ // The quick exception handler we're visiting for.
+ QuickExceptionHandler* const exception_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
+};
+
void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
mirror::Throwable* exception) {
DCHECK(!is_deoptimization_);
+ if (kDebugExceptionDelivery) {
+ mirror::String* msg = exception->GetDetailMessage();
+ std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
+ self_->DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
+ << ": " << str_msg << "\n");
+ }
StackHandleScope<1> hs(self_);
Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
@@ -42,14 +122,14 @@ void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
visitor.WalkStack(true);
- mirror::ArtMethod* catch_method = *handler_quick_frame_;
if (kDebugExceptionDelivery) {
- if (catch_method == nullptr) {
+ if (handler_quick_frame_->AsMirrorPtr() == nullptr) {
LOG(INFO) << "Handler is upcall";
- } else {
- const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
- LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
+ }
+ if (handler_method_ != nullptr) {
+ const DexFile& dex_file = *handler_method_->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ int line_number = dex_file.GetLineNumFromPC(handler_method_, handler_dex_pc_);
+ LOG(INFO) << "Handler: " << PrettyMethod(handler_method_) << " (line: " << line_number << ")";
}
}
if (clear_exception_) {
@@ -62,12 +142,94 @@ void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
// The debugger may suspend this thread and walk its stack. Let's do this before popping
// instrumentation frames.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- instrumentation->ExceptionCaughtEvent(self_, throw_location, catch_method, handler_dex_pc_,
+ instrumentation->ExceptionCaughtEvent(self_, throw_location, handler_method_, handler_dex_pc_,
exception_ref.Get());
}
+// Prepares deoptimization.
+class DeoptimizeStackVisitor FINAL : public StackVisitor {
+ public:
+ DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
+ prev_shadow_frame_(nullptr) {
+ CHECK(!self_->HasDeoptimizationShadowFrame());
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ exception_handler_->SetHandlerFrameId(GetFrameId());
+ mirror::ArtMethod* method = GetMethod();
+ if (method == nullptr) {
+ // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+ exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ return false; // End stack walk.
+ } else if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ } else {
+ return HandleDeoptimization(method);
+ }
+ }
+
+ private:
+ bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MethodHelper mh(m);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ CHECK(code_item != nullptr);
+ uint16_t num_regs = code_item->registers_size_;
+ uint32_t dex_pc = GetDexPc();
+ const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
+ uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
+ StackHandleScope<2> hs(self_);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
+ verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+ &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+ m->GetAccessFlags(), false, true, true);
+ verifier.Verify();
+ std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
+ for (uint16_t reg = 0; reg < num_regs; ++reg) {
+ VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
+ switch (kind) {
+ case kUndefined:
+ new_frame->SetVReg(reg, 0xEBADDE09);
+ break;
+ case kConstant:
+ new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
+ break;
+ case kReferenceVReg:
+ new_frame->SetVRegReference(reg,
+ reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
+ break;
+ default:
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ break;
+ }
+ }
+ if (prev_shadow_frame_ != nullptr) {
+ prev_shadow_frame_->SetLink(new_frame);
+ } else {
+ self_->SetDeoptimizationShadowFrame(new_frame);
+ }
+ prev_shadow_frame_ = new_frame;
+ return true;
+ }
+
+ Thread* const self_;
+ QuickExceptionHandler* const exception_handler_;
+ ShadowFrame* prev_shadow_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
+};
+
void QuickExceptionHandler::DeoptimizeStack() {
DCHECK(is_deoptimization_);
+ if (kDebugExceptionDelivery) {
+ self_->DumpStack(LOG(INFO) << "Deoptimizing: ");
+ }
DeoptimizeStackVisitor visitor(self_, context_, this);
visitor.WalkStack(true);
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index ef3766c0a6..a4229b33fc 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -19,6 +19,7 @@
#include "base/logging.h"
#include "base/mutex.h"
+#include "stack.h" // StackReference
namespace art {
@@ -31,9 +32,6 @@ class Thread;
class ThrowLocation;
class ShadowFrame;
-static constexpr bool kDebugExceptionDelivery = false;
-static constexpr size_t kInvalidFrameId = 0xffffffff;
-
// Manages exception delivery for Quick backend. Not used by Portable backend.
class QuickExceptionHandler {
public:
@@ -50,7 +48,7 @@ class QuickExceptionHandler {
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetHandlerQuickFrame(mirror::ArtMethod** handler_quick_frame) {
+ void SetHandlerQuickFrame(StackReference<mirror::ArtMethod>* handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
}
@@ -58,6 +56,18 @@ class QuickExceptionHandler {
handler_quick_frame_pc_ = handler_quick_frame_pc;
}
+ mirror::ArtMethod* GetHandlerMethod() const {
+ return handler_method_;
+ }
+
+ void SetHandlerMethod(mirror::ArtMethod* handler_quick_method) {
+ handler_method_ = handler_quick_method;
+ }
+
+ uint32_t GetHandlerDexPc() const {
+ return handler_dex_pc_;
+ }
+
void SetHandlerDexPc(uint32_t dex_pc) {
handler_dex_pc_ = dex_pc;
}
@@ -77,10 +87,12 @@ class QuickExceptionHandler {
// Is method tracing active?
const bool method_tracing_active_;
// Quick frame with found handler or last frame if no handler found.
- mirror::ArtMethod** handler_quick_frame_;
+ StackReference<mirror::ArtMethod>* handler_quick_frame_;
// PC to branch to for the handler.
uintptr_t handler_quick_frame_pc_;
- // Associated dex PC.
+ // The handler method to report to the debugger.
+ mirror::ArtMethod* handler_method_;
+ // The handler's dex PC, zero implies an uncaught exception.
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
bool clear_exception_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index be1fba4fa2..ef09816981 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -95,6 +95,13 @@ StackVisitor::StackVisitor(Thread* thread, Context* context)
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
+StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
+ : thread_(thread), cur_shadow_frame_(NULL),
+ cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
+ context_(context) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+}
+
uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
if (cur_shadow_frame_ != NULL) {
return cur_shadow_frame_->GetDexPC();
@@ -205,16 +212,16 @@ void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
}
uintptr_t StackVisitor::GetReturnPc() const {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
DCHECK(sp != NULL);
- byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+ byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ byte* sp = reinterpret_cast<byte*>(GetCurrentQuickFrame());
CHECK(sp != NULL);
- byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
+ byte* pc_addr = sp + GetMethod()->GetReturnPcOffsetInBytes();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -223,7 +230,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread) {
explicit NumFramesVisitor(Thread* thread)
: StackVisitor(thread, NULL), frames(0) {}
- virtual bool VisitFrame() {
+ bool VisitFrame() OVERRIDE {
frames++;
return true;
}
@@ -235,12 +242,47 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread) {
return visitor.frames;
}
+bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
+ struct HasMoreFramesVisitor : public StackVisitor {
+ explicit HasMoreFramesVisitor(Thread* thread, size_t num_frames, size_t frame_height)
+ : StackVisitor(thread, nullptr, num_frames), frame_height_(frame_height),
+ found_frame_(false), has_more_frames_(false), next_method_(nullptr), next_dex_pc_(0) {
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (found_frame_) {
+ mirror::ArtMethod* method = GetMethod();
+ if (method != nullptr && !method->IsRuntimeMethod()) {
+ has_more_frames_ = true;
+ next_method_ = method;
+ next_dex_pc_ = GetDexPc();
+ return false; // End stack walk once next method is found.
+ }
+ } else if (GetFrameHeight() == frame_height_) {
+ found_frame_ = true;
+ }
+ return true;
+ }
+
+ size_t frame_height_;
+ bool found_frame_;
+ bool has_more_frames_;
+ mirror::ArtMethod* next_method_;
+ uint32_t next_dex_pc_;
+ };
+ HasMoreFramesVisitor visitor(thread_, GetNumFrames(), GetFrameHeight());
+ visitor.WalkStack(true);
+ *next_method = visitor.next_method_;
+ *next_dex_pc = visitor.next_dex_pc_;
+ return visitor.has_more_frames_;
+}
+
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
explicit DescribeStackVisitor(Thread* thread)
: StackVisitor(thread, NULL) {}
- virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
@@ -307,7 +349,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == NULL);
- mirror::ArtMethod* method = *cur_quick_frame_;
+ mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
while (method != NULL) {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -352,9 +394,9 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_quick_frame_pc_ = return_pc;
byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
- cur_quick_frame_ = reinterpret_cast<mirror::ArtMethod**>(next_frame);
+ cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
cur_depth_++;
- method = *cur_quick_frame_;
+ method = cur_quick_frame_->AsMirrorPtr();
}
} else if (cur_shadow_frame_ != NULL) {
do {
diff --git a/runtime/stack.h b/runtime/stack.h
index 2e32f51343..fabdd4f46a 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -429,11 +429,11 @@ class PACKED(4) ManagedStack {
return link_;
}
- mirror::ArtMethod** GetTopQuickFrame() const {
+ StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
return top_quick_frame_;
}
- void SetTopQuickFrame(mirror::ArtMethod** top) {
+ void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
DCHECK(top_shadow_frame_ == NULL);
top_quick_frame_ = top;
}
@@ -491,7 +491,7 @@ class PACKED(4) ManagedStack {
private:
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
- mirror::ArtMethod** top_quick_frame_;
+ StackReference<mirror::ArtMethod>* top_quick_frame_;
uintptr_t top_quick_frame_pc_;
};
@@ -512,17 +512,7 @@ class StackVisitor {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetMethod();
} else if (cur_quick_frame_ != nullptr) {
- return *cur_quick_frame_;
- } else {
- return nullptr;
- }
- }
-
- mirror::ArtMethod** GetMethodAddress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (cur_shadow_frame_ != nullptr) {
- return cur_shadow_frame_->GetMethodAddress();
- } else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_;
+ return cur_quick_frame_->AsMirrorPtr();
} else {
return nullptr;
}
@@ -567,6 +557,10 @@ class StackVisitor {
return num_frames_;
}
+ // Get the method and dex pc immediately after the one that's currently being visited.
+ bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -578,7 +572,8 @@ class StackVisitor {
void SetGPR(uint32_t reg, uintptr_t value);
// This is a fast-path for getting/setting values in a quick frame.
- uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
+ uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
+ const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
@@ -679,7 +674,7 @@ class StackVisitor {
return cur_quick_frame_pc_;
}
- mirror::ArtMethod** GetCurrentQuickFrame() const {
+ StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
return cur_quick_frame_;
}
@@ -688,7 +683,7 @@ class StackVisitor {
}
HandleScope* GetCurrentHandleScope() const {
- mirror::ArtMethod** sp = GetCurrentQuickFrame();
+ StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
++sp; // Skip Method*; handle scope comes next;
return reinterpret_cast<HandleScope*>(sp);
}
@@ -700,13 +695,17 @@ class StackVisitor {
static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ // Private constructor known in the case that num_frames_ has already been computed.
+ StackVisitor(Thread* thread, Context* context, size_t num_frames)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(uint32_t depth) const;
void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Thread* const thread_;
ShadowFrame* cur_shadow_frame_;
- mirror::ArtMethod** cur_quick_frame_;
+ StackReference<mirror::ArtMethod>* cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 1355aa1143..758944cd57 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1867,16 +1867,6 @@ void Thread::QuickDeliverException() {
// resolution.
ClearException();
bool is_deoptimization = (exception == GetDeoptimizationException());
- if (kDebugExceptionDelivery) {
- if (!is_deoptimization) {
- mirror::String* msg = exception->GetDetailMessage();
- std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
- DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
- << ": " << str_msg << "\n");
- } else {
- DumpStack(LOG(INFO) << "Deoptimizing: ");
- }
- }
QuickExceptionHandler exception_handler(this, is_deoptimization);
if (is_deoptimization) {
exception_handler.DeoptimizeStack();
@@ -2012,9 +2002,14 @@ class ReferenceMapVisitor : public StackVisitor {
private:
void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod** method_addr = GetMethodAddress();
- visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
- mirror::ArtMethod* m = *method_addr;
+ StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
+ mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
+ mirror::ArtMethod* old_method = m;
+ visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
+ if (m != old_method) {
+ cur_quick_frame->Assign(m);
+ }
+
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
const uint8_t* native_gc_map = m->GetNativeGcMap();
@@ -2035,7 +2030,7 @@ class ReferenceMapVisitor : public StackVisitor {
const VmapTable vmap_table(m->GetVmapTable(code_pointer));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
- mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+ StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
// Does this register hold a reference?
diff --git a/runtime/thread.h b/runtime/thread.h
index 08bbcaec49..88b4b0de8f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -345,7 +345,7 @@ class Thread {
ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetTopOfStack(mirror::ArtMethod** top_method, uintptr_t pc) {
+ void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
}
@@ -1090,6 +1090,7 @@ class Thread {
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
+ friend class QuickExceptionHandler; // For dumping the stack.
friend class ScopedThreadStateChange;
friend class SignalCatcher; // For SetStateUnsafe.
friend class StubTest; // For accessing entrypoints.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 8df1e5d6dc..e24c92091c 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -929,7 +929,7 @@ mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) {
}
mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), common_elem);
+ mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
DCHECK(array_class != NULL);
return array_class;
} else {