summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Android.mk1
-rw-r--r--build/Android.common.mk1
-rw-r--r--compiler/dex/arena_allocator.cc32
-rw-r--r--compiler/dex/arena_allocator.h9
-rw-r--r--compiler/dex/arena_bit_vector.cc6
-rw-r--r--compiler/dex/arena_bit_vector.h8
-rw-r--r--compiler/dex/dataflow_iterator-inl.h71
-rw-r--r--compiler/dex/dataflow_iterator.h118
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc15
-rw-r--r--compiler/dex/growable_array.h10
-rw-r--r--compiler/dex/mir_analysis.cc2
-rw-r--r--compiler/dex/mir_dataflow.cc12
-rw-r--r--compiler/dex/mir_graph.cc17
-rw-r--r--compiler/dex/mir_graph.h25
-rw-r--r--compiler/dex/mir_optimization.cc14
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc10
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc10
-rw-r--r--compiler/dex/quick/arm/call_arm.cc11
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h1
-rw-r--r--compiler/dex/quick/arm/int_arm.cc16
-rw-r--r--compiler/dex/quick/arm/target_arm.cc5
-rw-r--r--compiler/dex/quick/codegen_util.cc76
-rw-r--r--compiler/dex/quick/gen_common.cc16
-rw-r--r--compiler/dex/quick/gen_invoke.cc12
-rw-r--r--compiler/dex/quick/local_optimizations.cc52
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc4
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h1
-rw-r--r--compiler/dex/quick/mips/target_mips.cc5
-rw-r--r--compiler/dex/quick/mir_to_lir-inl.h12
-rw-r--r--compiler/dex/quick/mir_to_lir.cc43
-rw-r--r--compiler/dex/quick/mir_to_lir.h28
-rw-r--r--compiler/dex/quick/ralloc_util.cc206
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc2
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h1
-rw-r--r--compiler/dex/quick/x86/target_x86.cc5
-rw-r--r--compiler/dex/ssa_transformation.cc20
-rw-r--r--compiler/dex/vreg_analysis.cc201
-rw-r--r--compiler/driver/compiler_driver.cc212
-rw-r--r--compiler/driver/compiler_driver.h58
-rw-r--r--compiler/image_writer.cc4
-rw-r--r--compiler/jni/portable/jni_compiler.cc4
-rw-r--r--compiler/jni/portable/jni_compiler.h4
-rw-r--r--compiler/jni/quick/jni_compiler.cc6
-rw-r--r--compiler/llvm/compiler_llvm.cc3
-rw-r--r--compiler/llvm/gbc_expander.cc20
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc4
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/utils/dedupe_set.h57
-rw-r--r--compiler/utils/dedupe_set_test.cc2
-rw-r--r--disassembler/Android.mk120
-rw-r--r--disassembler/disassembler.cc (renamed from runtime/disassembler.cc)0
-rw-r--r--disassembler/disassembler.h (renamed from runtime/disassembler.h)6
-rw-r--r--disassembler/disassembler_arm.cc (renamed from runtime/disassembler_arm.cc)0
-rw-r--r--disassembler/disassembler_arm.h (renamed from runtime/disassembler_arm.h)6
-rw-r--r--disassembler/disassembler_mips.cc (renamed from runtime/disassembler_mips.cc)0
-rw-r--r--disassembler/disassembler_mips.h (renamed from runtime/disassembler_mips.h)6
-rw-r--r--disassembler/disassembler_x86.cc (renamed from runtime/disassembler_x86.cc)0
-rw-r--r--disassembler/disassembler_x86.h (renamed from runtime/disassembler_x86.h)6
-rw-r--r--oatdump/Android.mk8
-rw-r--r--oatdump/oatdump.cc2
-rw-r--r--runtime/Android.mk7
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc38
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S90
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc38
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S64
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc38
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S7
-rw-r--r--runtime/base/mutex-inl.h126
-rw-r--r--runtime/base/mutex.cc142
-rw-r--r--runtime/class_linker.cc144
-rw-r--r--runtime/class_linker_test.cc24
-rw-r--r--runtime/common_test.h10
-rw-r--r--runtime/common_throws.cc2
-rw-r--r--runtime/common_throws.h3
-rw-r--r--runtime/debugger.cc17
-rw-r--r--runtime/debugger.h2
-rw-r--r--runtime/dex_file-inl.h15
-rw-r--r--runtime/dex_file.cc68
-rw-r--r--runtime/dex_file.h98
-rw-r--r--runtime/dex_file_test.cc27
-rw-r--r--runtime/dex_file_verifier.cc2
-rw-r--r--runtime/dex_instruction-inl.h117
-rw-r--r--runtime/dex_instruction.h183
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc57
-rw-r--r--runtime/entrypoints/entrypoint_utils.h119
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc12
-rw-r--r--runtime/entrypoints/math_entrypoints.cc50
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc53
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc21
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h14
-rw-r--r--runtime/gc/accounting/mod_union_table.cc146
-rw-r--r--runtime/gc/accounting/mod_union_table.h55
-rw-r--r--runtime/gc/accounting/space_bitmap.h4
-rw-r--r--runtime/gc/allocator/dlmalloc.h2
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h27
-rw-r--r--runtime/gc/collector/mark_sweep.cc123
-rw-r--r--runtime/gc/collector/mark_sweep.h42
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h4
-rw-r--r--runtime/gc/heap-inl.h188
-rw-r--r--runtime/gc/heap.cc262
-rw-r--r--runtime/gc/heap.h86
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h4
-rw-r--r--runtime/gc/space/dlmalloc_space.cc2
-rw-r--r--runtime/gc/space/large_object_space.cc1
-rw-r--r--runtime/hprof/hprof.cc8
-rw-r--r--runtime/indirect_reference_table.cc16
-rw-r--r--runtime/indirect_reference_table.h12
-rw-r--r--runtime/indirect_reference_table_test.cc1
-rw-r--r--runtime/instrumentation.cc23
-rw-r--r--runtime/instrumentation.h2
-rw-r--r--runtime/intern_table.cc17
-rw-r--r--runtime/intern_table.h3
-rw-r--r--runtime/intern_table_test.cc9
-rw-r--r--runtime/interpreter/interpreter.cc2882
-rw-r--r--runtime/interpreter/interpreter_common.cc342
-rw-r--r--runtime/interpreter/interpreter_common.h582
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc2344
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc2145
-rw-r--r--runtime/jdwp/jdwp_handler.cc4
-rw-r--r--runtime/jni_internal.cc28
-rw-r--r--runtime/jni_internal.h2
-rw-r--r--runtime/jni_internal_test.cc31
-rw-r--r--runtime/mirror/array-inl.h65
-rw-r--r--runtime/mirror/array.cc33
-rw-r--r--runtime/mirror/array.h14
-rw-r--r--runtime/mirror/class-inl.h19
-rw-r--r--runtime/mirror/class.cc87
-rw-r--r--runtime/mirror/class.h44
-rw-r--r--runtime/mirror/object.h5
-rw-r--r--runtime/mirror/stack_trace_element.cc1
-rw-r--r--runtime/mirror/string.cc8
-rw-r--r--runtime/mirror/string.h1
-rw-r--r--runtime/monitor.cc17
-rw-r--r--runtime/monitor.h8
-rw-r--r--runtime/native/java_lang_System.cc23
-rw-r--r--runtime/object_utils.h136
-rw-r--r--runtime/reference_table.cc10
-rw-r--r--runtime/reference_table.h6
-rw-r--r--runtime/reflection.cc4
-rw-r--r--runtime/root_visitor.h3
-rw-r--r--runtime/runtime.cc67
-rw-r--r--runtime/runtime.h9
-rw-r--r--runtime/sirt_ref.h3
-rw-r--r--runtime/stack.cc4
-rw-r--r--runtime/stack.h15
-rw-r--r--runtime/thread-inl.h13
-rw-r--r--runtime/thread.cc183
-rw-r--r--runtime/thread.h18
-rw-r--r--runtime/thread_list.cc17
-rw-r--r--runtime/throw_location.cc11
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/utf-inl.h61
-rw-r--r--runtime/utf.cc36
-rw-r--r--runtime/utf.h5
-rw-r--r--runtime/utils.cc24
-rw-r--r--runtime/verifier/method_verifier.cc181
-rw-r--r--runtime/verifier/method_verifier.h19
-rw-r--r--runtime/verifier/reg_type.cc16
-rw-r--r--runtime/verifier/reg_type.h9
-rw-r--r--runtime/verifier/reg_type_cache-inl.h21
-rw-r--r--runtime/verifier/reg_type_cache.cc97
-rw-r--r--runtime/verifier/reg_type_cache.h63
-rw-r--r--runtime/verifier/register_line.cc3
-rw-r--r--runtime/verifier/register_line.h32
-rw-r--r--test/Android.mk2
-rw-r--r--test/GetMethodSignature/GetMethodSignature.java (renamed from test/CreateMethodSignature/CreateMethodSignature.java)4
-rwxr-xr-xtest/run-test2
167 files changed, 9045 insertions, 4949 deletions
diff --git a/Android.mk b/Android.mk
index 46a7c1ec3e..0b4b2316fd 100644
--- a/Android.mk
+++ b/Android.mk
@@ -85,6 +85,7 @@ ifneq ($(art_dont_bother),true)
include $(art_path)/runtime/Android.mk
include $(art_path)/compiler/Android.mk
include $(art_path)/dex2oat/Android.mk
+include $(art_path)/disassembler/Android.mk
include $(art_path)/oatdump/Android.mk
include $(art_path)/dalvikvm/Android.mk
include $(art_path)/jdwpspy/Android.mk
diff --git a/build/Android.common.mk b/build/Android.common.mk
index dd0ba4d1d3..0871884b00 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -105,6 +105,7 @@ ART_HOST_SHLIB_EXTENSION ?= .so
ART_C_INCLUDES := \
external/gtest/include \
external/valgrind/main/include \
+ external/valgrind/main \
external/zlib \
frameworks/compile/mclinker/include
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
index 36393e7387..2da806437c 100644
--- a/compiler/dex/arena_allocator.cc
+++ b/compiler/dex/arena_allocator.cc
@@ -19,12 +19,15 @@
#include "arena_allocator.h"
#include "base/logging.h"
#include "base/mutex.h"
+#include "thread-inl.h"
+#include <memcheck/memcheck.h>
namespace art {
// Memmap is a bit slower than malloc according to my measurements.
static constexpr bool kUseMemMap = false;
static constexpr bool kUseMemSet = true && kUseMemMap;
+static constexpr size_t kValgrindRedZoneBytes = 8;
static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
"Misc ",
@@ -107,6 +110,9 @@ Arena* ArenaPool::AllocArena(size_t size) {
void ArenaPool::FreeArena(Arena* arena) {
Thread* self = Thread::Current();
+ if (UNLIKELY(RUNNING_ON_VALGRIND)) {
+ VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+ }
{
MutexLock lock(self, lock_);
arena->next_ = free_arenas_;
@@ -128,7 +134,8 @@ ArenaAllocator::ArenaAllocator(ArenaPool* pool)
end_(nullptr),
ptr_(nullptr),
arena_head_(nullptr),
- num_allocations_(0) {
+ num_allocations_(0),
+ running_on_valgrind_(RUNNING_ON_VALGRIND) {
memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
}
@@ -140,6 +147,29 @@ void ArenaAllocator::UpdateBytesAllocated() {
}
}
+void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+ size_t rounded_bytes = (bytes + 3 + kValgrindRedZoneBytes) & ~3;
+ if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
+ // Obtain a new block.
+ ObtainNewArenaForAllocation(rounded_bytes);
+ if (UNLIKELY(ptr_ == nullptr)) {
+ return nullptr;
+ }
+ }
+ if (kCountAllocations) {
+ alloc_stats_[kind] += rounded_bytes;
+ ++num_allocations_;
+ }
+ uint8_t* ret = ptr_;
+ ptr_ += rounded_bytes;
+ // Check that the memory is already zeroed out.
+ for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
+ CHECK_EQ(*ptr, 0U);
+ }
+ VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
+ return ret;
+}
+
ArenaAllocator::~ArenaAllocator() {
// Reclaim all the arenas by giving them back to the thread pool.
UpdateBytesAllocated();
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
index dda52a2ed0..d11d67c795 100644
--- a/compiler/dex/arena_allocator.h
+++ b/compiler/dex/arena_allocator.h
@@ -103,6 +103,9 @@ class ArenaAllocator {
// Returns zeroed memory.
void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ if (UNLIKELY(running_on_valgrind_)) {
+ return AllocValgrind(bytes, kind);
+ }
bytes = (bytes + 3) & ~3;
if (UNLIKELY(ptr_ + bytes > end_)) {
// Obtain a new block.
@@ -120,6 +123,7 @@ class ArenaAllocator {
return ret;
}
+ void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
void ObtainNewArenaForAllocation(size_t allocation_size);
size_t BytesAllocated() const;
void DumpMemStats(std::ostream& os) const;
@@ -132,10 +136,9 @@ class ArenaAllocator {
uint8_t* end_;
uint8_t* ptr_;
Arena* arena_head_;
-
- // Statistics.
size_t num_allocations_;
- size_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds.
+ size_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds.
+ bool running_on_valgrind_;
DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
}; // ArenaAllocator
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
index 3fa9295276..b921f615b6 100644
--- a/compiler/dex/arena_bit_vector.cc
+++ b/compiler/dex/arena_bit_vector.cc
@@ -87,12 +87,6 @@ void ArenaBitVector::ClearBit(unsigned int num) {
storage_[num >> 5] &= ~check_masks[num & 0x1f];
}
-// Copy a whole vector to the other. Sizes must match.
-void ArenaBitVector::Copy(ArenaBitVector* src) {
- DCHECK_EQ(storage_size_, src->GetStorageSize());
- memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
-}
-
// Intersect with another bit vector. Sizes and expandability must be the same.
void ArenaBitVector::Intersect(const ArenaBitVector* src) {
DCHECK_EQ(storage_size_, src->GetStorageSize());
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 8bcd628dc0..24a7ce9601 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -44,7 +44,7 @@ class ArenaBitVector {
DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
- if (bit_index_ >= bit_size_) return -1;
+ if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
uint32_t word_index = bit_index_ / 32;
uint32_t word = bit_storage_[word_index];
@@ -54,7 +54,7 @@ class ArenaBitVector {
bit_index_ &= ~0x1f;
do {
word_index++;
- if ((word_index * 32) >= bit_size_) {
+ if (UNLIKELY((word_index * 32) >= bit_size_)) {
bit_index_ = bit_size_;
return -1;
}
@@ -95,7 +95,9 @@ class ArenaBitVector {
bool IsBitSet(unsigned int num);
void ClearAllBits();
void SetInitialBits(unsigned int num_bits);
- void Copy(ArenaBitVector* src);
+ void Copy(ArenaBitVector* src) {
+ memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+ }
void Intersect(const ArenaBitVector* src2);
void Union(const ArenaBitVector* src);
// Are we equal to another bit vector? Note: expandability attributes must also match.
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 06cc505a9a..236c6f4940 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -21,42 +21,63 @@
namespace art {
-inline BasicBlock* DataflowIterator::NextBody(bool had_change) {
+// Single forward pass over the nodes.
+inline BasicBlock* DataflowIterator::ForwardSingleNext() {
+ BasicBlock* res = NULL;
+ if (idx_ < end_idx_) {
+ int bb_id = block_id_list_->Get(idx_++);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// Repeat full forward passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ForwardRepeatNext(bool had_change) {
changed_ |= had_change;
BasicBlock* res = NULL;
- if (reverse_) {
- if (is_iterative_ && changed_ && (idx_ < 0)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ >= 0) {
- int bb_id = block_id_list_->Get(idx_--);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
- } else {
- if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ < end_idx_) {
- int bb_id = block_id_list_->Get(idx_++);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
+ if ((idx_ >= end_idx_) && changed_) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ < end_idx_) {
+ int bb_id = block_id_list_->Get(idx_++);
+ res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
}
-// AllNodes uses the existing GrowableArray iterator, so use different NextBody().
-inline BasicBlock* AllNodesIterator::NextBody(bool had_change) {
+// Single reverse pass over the nodes.
+inline BasicBlock* DataflowIterator::ReverseSingleNext() {
+ BasicBlock* res = NULL;
+ if (idx_ >= 0) {
+ int bb_id = block_id_list_->Get(idx_--);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
+inline BasicBlock* DataflowIterator::ReverseRepeatNext(bool had_change) {
changed_ |= had_change;
BasicBlock* res = NULL;
+ if ((idx_ < 0) && changed_) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ >= 0) {
+ int bb_id = block_id_list_->Get(idx_--);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ return res;
+}
+
+// AllNodes uses the existing GrowableArray iterator, and should be considered unordered.
+inline BasicBlock* AllNodesIterator::Next() {
+ BasicBlock* res = NULL;
bool keep_looking = true;
while (keep_looking) {
res = all_nodes_iterator_->Next();
- if (is_iterative_ && changed_ && (res == NULL)) {
- all_nodes_iterator_->Reset();
- changed_ = false;
- } else if ((res == NULL) || (!res->hidden)) {
+ if ((res == NULL) || (!res->hidden)) {
keep_looking = false;
}
}
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index da44ffd99c..1dab54ea72 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -27,124 +27,130 @@ namespace art {
* interesting orders. Note that for efficiency, the visit orders have been pre-computed.
* The order itself will not change during the iteration. However, for some uses,
* auxiliary data associated with the basic blocks may be changed during the iteration,
- * necessitating another pass over the list.
- *
- * To support this usage, we have is_iterative_. If false, the iteration is a one-shot
- * pass through the pre-computed list using Next(). If true, the caller must tell the
- * iterator whether a change has been made that necessitates another pass. Use
- * Next(had_change) for this. The general idea is that the iterative_ use case means
- * that the iterator will keep repeating the full basic block list until a complete pass
- * is made through it with no changes. Note that calling Next(true) does not affect
- * the iteration order or short-curcuit the current pass - it simply tells the iterator
- * that once it has finished walking through the block list it should reset and do another
- * full pass through the list.
+ * necessitating another pass over the list. If this behavior is required, use the
+ * "Repeating" variant. For the repeating variant, the caller must tell the iterator
+ * whether a change has been made that necessitates another pass. Note that calling Next(true)
+ * does not affect the iteration order or short-circuit the current pass - it simply tells
+ * the iterator that once it has finished walking through the block list it should reset and
+ * do another full pass through the list.
*/
class DataflowIterator {
public:
virtual ~DataflowIterator() {}
- // Return the next BasicBlock* to visit.
- BasicBlock* Next() {
- DCHECK(!is_iterative_);
- return NextBody(false);
- }
-
- /*
- * Return the next BasicBlock* to visit, and tell the iterator whether any change
- * has occurred that requires another full pass over the block list.
- */
- BasicBlock* Next(bool had_change) {
- DCHECK(is_iterative_);
- return NextBody(had_change);
- }
-
protected:
- DataflowIterator(MIRGraph* mir_graph, bool is_iterative, int start_idx, int end_idx,
- bool reverse)
+ DataflowIterator(MIRGraph* mir_graph, int start_idx, int end_idx)
: mir_graph_(mir_graph),
- is_iterative_(is_iterative),
start_idx_(start_idx),
end_idx_(end_idx),
- reverse_(reverse),
block_id_list_(NULL),
idx_(0),
changed_(false) {}
- virtual BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+ virtual BasicBlock* ForwardSingleNext() ALWAYS_INLINE;
+ virtual BasicBlock* ReverseSingleNext() ALWAYS_INLINE;
+ virtual BasicBlock* ForwardRepeatNext(bool had_change) ALWAYS_INLINE;
+ virtual BasicBlock* ReverseRepeatNext(bool had_change) ALWAYS_INLINE;
MIRGraph* const mir_graph_;
- const bool is_iterative_;
const int start_idx_;
const int end_idx_;
- const bool reverse_;
GrowableArray<int>* block_id_list_;
int idx_;
bool changed_;
}; // DataflowIterator
- class ReachableNodesIterator : public DataflowIterator {
+ class PreOrderDfsIterator : public DataflowIterator {
public:
- ReachableNodesIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit PreOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsOrder();
}
+
+ BasicBlock* Next() {
+ return ForwardSingleNext();
+ }
};
- class PreOrderDfsIterator : public DataflowIterator {
+ class RepeatingPreOrderDfsIterator : public DataflowIterator {
public:
- PreOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit RepeatingPreOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsOrder();
}
+
+ BasicBlock* Next(bool had_change) {
+ return ForwardRepeatNext(had_change);
+ }
};
- class PostOrderDfsIterator : public DataflowIterator {
+ class RepeatingPostOrderDfsIterator : public DataflowIterator {
public:
- PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit RepeatingPostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsPostOrder();
}
+
+ BasicBlock* Next(bool had_change) {
+ return ForwardRepeatNext(had_change);
+ }
};
class ReversePostOrderDfsIterator : public DataflowIterator {
public:
- ReversePostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative,
- mir_graph->GetNumReachableBlocks() -1, 0, true) {
+ explicit ReversePostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
+ idx_ = start_idx_;
+ block_id_list_ = mir_graph->GetDfsPostOrder();
+ }
+
+ BasicBlock* Next() {
+ return ReverseSingleNext();
+ }
+ };
+
+ class RepeatingReversePostOrderDfsIterator : public DataflowIterator {
+ public:
+ explicit RepeatingReversePostOrderDfsIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDfsPostOrder();
}
+
+ BasicBlock* Next(bool had_change) {
+ return ReverseRepeatNext(had_change);
+ }
};
class PostOrderDOMIterator : public DataflowIterator {
public:
- PostOrderDOMIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0,
- mir_graph->GetNumReachableBlocks(), false) {
+ explicit PostOrderDOMIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
idx_ = start_idx_;
block_id_list_ = mir_graph->GetDomPostOrder();
}
+
+ BasicBlock* Next() {
+ return ForwardSingleNext();
+ }
};
class AllNodesIterator : public DataflowIterator {
public:
- AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
- : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
- all_nodes_iterator_ =
- new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
+ explicit AllNodesIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, 0) {
+ all_nodes_iterator_ = new
+ (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
}
void Reset() {
all_nodes_iterator_->Reset();
}
- BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
+ BasicBlock* Next() ALWAYS_INLINE;
private:
GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 63d8aa04f8..abafbc5830 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -24,6 +24,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
+#include "thread-inl.h"
namespace art {
namespace optimizer {
@@ -216,8 +217,8 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
uint32_t field_idx = inst->VRegC_22c();
int field_offset;
bool is_volatile;
- bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, field_offset,
- is_volatile, is_put);
+ bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
+ &field_offset, &is_volatile);
if (fast_path && !is_volatile && IsUint(16, field_offset)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
@@ -246,11 +247,13 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
int vtable_idx;
uintptr_t direct_code;
uintptr_t direct_method;
- bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc, invoke_type,
- target_method, vtable_idx,
- direct_code, direct_method,
- false);
// TODO: support devirtualization.
+ const bool kEnableDevirtualization = false;
+ bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc,
+ false, kEnableDevirtualization,
+ &invoke_type,
+ &target_method, &vtable_idx,
+ &direct_code, &direct_method);
if (fast_path && original_invoke_type == invoke_type) {
if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
index 8e2abfbaf1..639120a2ba 100644
--- a/compiler/dex/growable_array.h
+++ b/compiler/dex/growable_array.h
@@ -131,6 +131,11 @@ class GrowableArray {
elem_list_[index]++;
}
+ /*
+ * Remove an existing element from list. If there are more than one copy
+ * of the element, only the first one encountered will be deleted.
+ */
+ // TODO: consider renaming this.
void Delete(T element) {
bool found = false;
for (size_t i = 0; i < num_used_ - 1; i++) {
@@ -150,6 +155,11 @@ class GrowableArray {
size_t Size() const { return num_used_; }
+ void SetSize(size_t new_size) {
+ Resize(new_size);
+ num_used_ = new_size;
+ }
+
T* GetRawStorage() const { return elem_list_; }
static void* operator new(size_t size, ArenaAllocator* arena) {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index d7a4136a01..8472a3c011 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1061,7 +1061,7 @@ bool MIRGraph::SkipCompilation(Runtime::CompilerFilter compiler_filter) {
memset(&stats, 0, sizeof(stats));
ClearAllVisitedFlags();
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 3a73717a7b..3d29908e9f 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1221,10 +1221,10 @@ bool MIRGraph::InvokeUsesMethodStar(MIR* mir) {
uint32_t current_offset = static_cast<uint32_t>(current_offset_);
bool fast_path =
cu_->compiler_driver->ComputeInvokeInfo(&m_unit, current_offset,
- type, target_method,
- vtable_idx,
- direct_code, direct_method,
- false) &&
+ false, true,
+ &type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method) &&
!(cu_->enable_debug & (1 << kDebugSlowInvokePath));
return (((type == kDirect) || (type == kStatic)) &&
fast_path && ((direct_code == 0) || (direct_method == 0)));
@@ -1287,7 +1287,7 @@ void MIRGraph::MethodUseCount() {
if (cu_->disable_opt & (1 << kPromoteRegs)) {
return;
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CountUses(bb);
}
@@ -1331,7 +1331,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
VerifyPredInfo(bb);
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index a12bf39e64..c234298a88 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -99,6 +99,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
cur_block_(NULL),
num_blocks_(0),
current_code_item_(NULL),
+ block_map_(arena, 0, kGrowableArrayMisc),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
@@ -210,18 +211,18 @@ BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool creat
BasicBlock** immed_pred_block_p) {
BasicBlock* bb;
unsigned int i;
- SafeMap<unsigned int, BasicBlock*>::iterator it;
- it = block_map_.find(code_offset);
- if (it != block_map_.end()) {
- return it->second;
- } else if (!create) {
+ if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
return NULL;
}
+ bb = block_map_.Get(code_offset);
+ if ((bb != NULL) || !create) {
+ return bb;
+ }
if (split) {
- for (i = 0; i < block_list_.Size(); i++) {
- bb = block_list_.Get(i);
+ for (i = block_list_.Size(); i > 0; i--) {
+ bb = block_list_.Get(i - 1);
if (bb->block_type != kDalvikByteCode) continue;
/* Check if a branch jumps into the middle of an existing block */
if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
@@ -518,6 +519,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
// TODO: need to rework expansion of block list & try_block_addr when inlining activated.
block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
+ block_map_.SetSize(block_map_.Size() + current_code_item_->insns_size_in_code_units_);
+
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 6f8bd85630..9d4ab98f67 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -580,11 +580,34 @@ class MIRGraph {
void SSATransformation();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
void NullCheckElimination();
+ /*
+ * Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
+ * we have to do some work to figure out the sreg type. For some operations it is
+ * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
+ * may never know the "real" type.
+ *
+ * We perform the type inference operation by using an iterative walk over
+ * the graph, propagating types "defined" by typed opcodes to uses and defs in
+ * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined
+ * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to
+ * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag
+ * tells whether our guess of the type is based on a previously typed definition.
+ * If so, the defined type takes precedence. Note that it's possible to have the same sreg
+ * show multiple defined types because dx treats constants as untyped bit patterns.
+ * The return value of the Setxx() helpers says whether or not the Setxx() action changed
+ * the current guess, and is used to know when to terminate the iterative walk.
+ */
bool SetFp(int index, bool is_fp);
+ bool SetFp(int index);
bool SetCore(int index, bool is_core);
+ bool SetCore(int index);
bool SetRef(int index, bool is_ref);
+ bool SetRef(int index);
bool SetWide(int index, bool is_wide);
+ bool SetWide(int index);
bool SetHigh(int index, bool is_high);
+ bool SetHigh(int index);
+
void AppendMIR(BasicBlock* bb, MIR* mir);
void PrependMIR(BasicBlock* bb, MIR* mir);
void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
@@ -705,7 +728,7 @@ class MIRGraph {
BasicBlock* cur_block_;
int num_blocks_;
const DexFile::CodeItem* current_code_item_;
- SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
+ GrowableArray<BasicBlock*> block_map_; // FindBlock lookup cache.
std::vector<DexCompilationUnit*> m_units_; // List of methods included in this graph
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
std::vector<MIRLocation> method_stack_; // Include stack
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index b7611f8f5b..05e428e178 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -96,7 +96,7 @@ void MIRGraph::PropagateConstants() {
is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(),
ArenaAllocator::kAllocDFInfo));
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
DoConstantPropogation(bb);
}
@@ -762,11 +762,11 @@ bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) {
void MIRGraph::NullCheckElimination() {
if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
DCHECK(temp_ssa_register_v_ != NULL);
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
NullCheckEliminationInit(bb);
}
- PreOrderDfsIterator iter2(this, true /* iterative */);
+ RepeatingPreOrderDfsIterator iter2(this);
bool change = false;
for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
change = EliminateNullChecks(bb);
@@ -778,7 +778,7 @@ void MIRGraph::NullCheckElimination() {
}
void MIRGraph::BasicBlockCombine() {
- PreOrderDfsIterator iter(this, false /* not iterative */);
+ PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CombineBlocks(bb);
}
@@ -791,7 +791,7 @@ void MIRGraph::CodeLayout() {
if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
VerifyDataflow();
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
LayoutBlocks(bb);
}
@@ -804,7 +804,7 @@ void MIRGraph::DumpCheckStats() {
Checkstats* stats =
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
checkstats_ = stats;
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CountChecks(bb);
}
@@ -858,7 +858,7 @@ void MIRGraph::BasicBlockOptimization() {
if (!(cu_->disable_opt & (1 << kBBOpt))) {
DCHECK_EQ(cu_->num_compiler_temps, 0);
ClearAllVisitedFlags();
- PreOrderDfsIterator iter2(this, false /* not iterative */);
+ PreOrderDfsIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 90cec75039..df10f7eda0 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -30,10 +30,10 @@
#include "dex/compiler_internals.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex/frontend.h"
-#include "mir_to_gbc.h"
-
#include "llvm/llvm_compilation_unit.h"
#include "llvm/utils_llvm.h"
+#include "mir_to_gbc.h"
+#include "thread-inl.h"
const char* kLabelFormat = "%c0x%x_%d";
const char kInvalidBlock = 0xff;
@@ -1877,7 +1877,7 @@ void MirConverter::MethodMIR2Bitcode() {
CreateFunction();
// Create an LLVM basic block for each MIR block in dfs preorder
- PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
+ PreOrderDfsIterator iter(mir_graph_);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
CreateLLVMBasicBlock(bb);
}
@@ -1909,7 +1909,7 @@ void MirConverter::MethodMIR2Bitcode() {
}
}
- PreOrderDfsIterator iter2(mir_graph_, false /* not iterative */);
+ PreOrderDfsIterator iter2(mir_graph_);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
BlockBitcodeConversion(bb);
}
@@ -1972,7 +1972,7 @@ void MirConverter::MethodMIR2Bitcode() {
::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
new ::llvm::tool_output_file(fname.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
+ ::llvm::raw_fd_ostream::F_Binary));
if (!errmsg.empty()) {
LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 0649c9f319..2d69d935ca 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1122,6 +1122,12 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) {
lir->operands[1] = 0;
lir->target = 0;
SetupResourceMasks(lir);
+ /*
+ * Because we just added this new instruction after the current one,
+ * advance lir so that this new instruction won't be checked for displacement
+ * overflow until the next pass (when its base offset will be properly established).
+ */
+ lir = new_inst;
res = kRetryAll;
} else {
lir->operands[1] = delta >> 1;
@@ -1170,7 +1176,7 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) {
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == 0) { // Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumbBUncond) {
@@ -1188,7 +1194,7 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) {
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == -1) { // Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
}
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 2dbe5f5c36..bba2ec5c4e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -120,9 +120,10 @@ MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
// TODO: move to common code
void ArmMir2Lir::GenPrintLabel(MIR* mir) {
/* Mark the beginning of a Dalvik instruction for line tracking */
- char* inst_str = cu_->verbose ?
- mir_graph_->GetDalvikDisassembly(mir) : NULL;
- MarkBoundary(mir->offset, inst_str);
+ if (cu_->verbose) {
+ char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
+ MarkBoundary(mir->offset, inst_str);
+ }
}
MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
@@ -130,7 +131,7 @@ MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
int field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
@@ -155,7 +156,7 @@ MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
int field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 291319f258..1954fbac51 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -51,7 +51,6 @@ class ArmMir2Lir : public Mir2Lir {
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 6fbdd2fd49..07782d957f 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -234,11 +234,17 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegImm(kOpCmp, rl_src.low_reg, 0);
- OpIT(kCondEq, "E");
- LIR* l1 = OpRegCopy(rl_result.low_reg, rl_true.low_reg);
- l1->flags.is_nop = false; // Make sure this instruction isn't optimized away
- LIR* l2 = OpRegCopy(rl_result.low_reg, rl_false.low_reg);
- l2->flags.is_nop = false; // Make sure this instruction isn't optimized away
+ if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
+ OpIT(kCondNe, "");
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
+ OpIT(kCondEq, "");
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ } else { // Normal - select between the two.
+ OpIT(kCondEq, "E");
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ }
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
StoreValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 6cc3052da1..203a8cc55d 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -691,11 +691,6 @@ RegLocation ArmMir2Lir::GetReturnAlt() {
return res;
}
-ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg) {
- return ARM_FPREG(reg) ? &reg_pool_->FPRegs[reg & ARM_FP_REG_MASK]
- : &reg_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void ArmMir2Lir::LockCallTemps() {
LockTemp(r0);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index a49fa7b44d..6e49f0bc54 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -50,14 +50,37 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
}
-bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put) {
+bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
return cu_->compiler_driver->ComputeInstanceFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
+}
+
+/* Remove a LIR from the list. */
+void Mir2Lir::UnlinkLIR(LIR* lir) {
+ if (UNLIKELY(lir == first_lir_insn_)) {
+ first_lir_insn_ = lir->next;
+ if (lir->next != NULL) {
+ lir->next->prev = NULL;
+ } else {
+ DCHECK(lir->next == NULL);
+ DCHECK(lir == last_lir_insn_);
+ last_lir_insn_ = NULL;
+ }
+ } else if (lir == last_lir_insn_) {
+ last_lir_insn_ = lir->prev;
+ lir->prev->next = NULL;
+ } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = lir->next;
+ lir->next->prev = lir->prev;
+ }
}
/* Convert an instruction to a NOP */
void Mir2Lir::NopLIR(LIR* lir) {
lir->flags.is_nop = true;
+ if (!cu_->verbose) {
+ UnlinkLIR(lir);
+ }
}
void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
@@ -225,12 +248,12 @@ void Mir2Lir::DumpPromotionMap() {
}
/* Dump a mapping table */
-void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor,
- const std::string& name, const std::string& signature,
+void Mir2Lir::DumpMappingTable(const char* table_name, const char* descriptor,
+ const char* name, const Signature& signature,
const std::vector<uint32_t>& v) {
if (v.size() > 0) {
std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name,
- descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
+ descriptor, name, signature.ToString().c_str(), v.size()));
std::replace(line.begin(), line.end(), ';', '_');
LOG(INFO) << line;
for (uint32_t i = 0; i < v.size(); i+=2) {
@@ -270,9 +293,9 @@ void Mir2Lir::CodegenDump() {
const DexFile::MethodId& method_id =
cu_->dex_file->GetMethodId(cu_->method_idx);
- std::string signature(cu_->dex_file->GetMethodSignature(method_id));
- std::string name(cu_->dex_file->GetMethodName(method_id));
- std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+ const Signature signature = cu_->dex_file->GetMethodSignature(method_id);
+ const char* name = cu_->dex_file->GetMethodName(method_id);
+ const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
// Dump mapping tables
DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
@@ -698,11 +721,11 @@ int Mir2Lir::AssignInsnOffsets() {
for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
lir->offset = offset;
- if (lir->opcode >= 0) {
+ if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
offset += lir->flags.size;
}
- } else if (lir->opcode == kPseudoPseudoAlign4) {
+ } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
if (offset & 0x2) {
offset += 2;
lir->operands[0] = 1;
@@ -712,7 +735,6 @@ int Mir2Lir::AssignInsnOffsets() {
}
/* Pseudo opcodes don't consume space */
}
-
return offset;
}
@@ -785,21 +807,17 @@ void Mir2Lir::AssembleLIR() {
/*
* Insert a kPseudoCaseLabel at the beginning of the Dalvik
* offset vaddr. This label will be used to fix up the case
- * branch table during the assembly phase. Be sure to set
- * all resource flags on this to prevent code motion across
- * target boundaries. KeyVal is just there for debugging.
+ * branch table during the assembly phase. All resource flags
+ * are set to prevent code motion. KeyVal is just there for debugging.
*/
LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
- SafeMap<unsigned int, LIR*>::iterator it;
- it = boundary_map_.find(vaddr);
- if (it == boundary_map_.end()) {
- LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
- }
+ LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
new_label->dalvik_offset = vaddr;
new_label->opcode = kPseudoCaseLabel;
new_label->operands[0] = keyVal;
- InsertLIRAfter(it->second, new_label);
+ new_label->def_mask = ENCODE_ALL;
+ InsertLIRAfter(boundary_lir, new_label);
return new_label;
}
@@ -883,18 +901,9 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
}
}
-/*
- * Set up special LIR to mark a Dalvik byte-code instruction start and
- * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in
- * which we split a single Dalvik instruction, only the first MIR op
- * associated with a Dalvik PC should be entered into the map.
- */
-LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
- LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
- if (boundary_map_.find(offset) == boundary_map_.end()) {
- boundary_map_.Put(offset, res);
- }
- return res;
+/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
+void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
+ NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
}
bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
@@ -949,6 +958,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
+ tempreg_info_(arena, 20, kGrowableArrayMisc),
+ reginfo_map_(arena, 64, kGrowableArrayMisc),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
@@ -1091,5 +1102,4 @@ void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
new_lir->next->prev = new_lir;
}
-
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index f018c61819..4dd55d763a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -127,13 +127,11 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
- OpUnconditionalBranch(fall_through);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
- OpUnconditionalBranch(fall_through);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -164,7 +162,6 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s
LOG(FATAL) << "Unexpected opcode " << opcode;
}
OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
- OpUnconditionalBranch(fall_through);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -337,8 +334,8 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
bool is_volatile;
bool is_referrers_class;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
- is_referrers_class, is_volatile, true);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
int rBase;
@@ -423,8 +420,8 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
bool is_volatile;
bool is_referrers_class;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
- is_referrers_class, is_volatile, false);
+ field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
int rBase;
@@ -626,7 +623,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
int field_offset;
bool is_volatile;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
+ bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
RegLocation rl_result;
@@ -687,8 +684,7 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
int field_offset;
bool is_volatile;
- bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
- true);
+ bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
if (fast_path && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
DCHECK_GE(field_offset, 0);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2a0a23c7cd..ed83863733 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1219,8 +1219,10 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info) {
* method. By doing this during basic block construction, we can also
* take advantage of/generate new useful dataflow info.
*/
+ const DexFile::MethodId& target_mid = cu_->dex_file->GetMethodId(info->index);
+ const DexFile::TypeId& declaring_type = cu_->dex_file->GetTypeId(target_mid.class_idx_);
StringPiece tgt_methods_declaring_class(
- cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
+ cu_->dex_file->StringDataAsStringPieceByIdx(declaring_type.descriptor_idx_));
if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
@@ -1373,10 +1375,10 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
bool fast_path =
cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
current_dalvik_offset_,
- info->type, target_method,
- vtable_idx,
- direct_code, direct_method,
- true) && !SLOW_INVOKE_PATH;
+ true, true,
+ &info->type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
if (info->type == kInterface) {
if (fast_path) {
p_null_ck = &null_ck;
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 630e990733..cb7694de68 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -99,12 +99,11 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
int native_reg_id;
if (cu_->instruction_set == kX86) {
// If x86, location differs depending on whether memory/reg operation.
- native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
- : this_lir->operands[0];
+ native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
} else {
native_reg_id = this_lir->operands[0];
}
- bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+ bool is_this_lir_load = target_flags & IS_LOAD;
LIR* check_lir;
/* Use the mem mask to determine the rough memory location */
uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
@@ -169,7 +168,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
if (check_lir->operands[0] != native_reg_id) {
ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
}
- check_lir->flags.is_nop = true;
+ NopLIR(check_lir);
}
} else if (alias_condition == ENCODE_DALVIK_REG) {
/* Must alias */
@@ -188,7 +187,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
native_reg_id) {
ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
}
- check_lir->flags.is_nop = true;
+ NopLIR(check_lir);
} else {
/*
* Destinaions are of different types -
@@ -202,7 +201,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
stop_here = true;
} else if (!is_this_lir_load && !is_check_lir_load) {
/* WAW - nuke the earlier store */
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
stop_here = true;
}
/* Partial overlap */
@@ -257,7 +256,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
* top-down order.
*/
InsertLIRBefore(check_lir, new_store_lir);
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
}
break;
} else if (!check_lir->flags.is_nop) {
@@ -453,7 +452,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
* is never the first LIR on the list
*/
InsertLIRBefore(cur_lir, new_load_lir);
- this_lir->flags.is_nop = true;
+ NopLIR(this_lir);
}
}
}
@@ -468,41 +467,4 @@ void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) {
}
}
-/*
- * Nop any unconditional branches that go to the next instruction.
- * Note: new redundant branches may be inserted later, and we'll
- * use a check in final instruction assembly to nop those out.
- */
-void Mir2Lir::RemoveRedundantBranches() {
- LIR* this_lir;
-
- for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
- /* Branch to the next instruction */
- if (IsUnconditionalBranch(this_lir)) {
- LIR* next_lir = this_lir;
-
- while (true) {
- next_lir = NEXT_LIR(next_lir);
-
- /*
- * Is the branch target the next instruction?
- */
- if (next_lir == this_lir->target) {
- this_lir->flags.is_nop = true;
- break;
- }
-
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the last_lir_insn_ here because it
- * might be the last real instruction.
- */
- if (!is_pseudo_opcode(next_lir->opcode) ||
- (next_lir == last_lir_insn_))
- break;
- }
- }
- }
-}
-
} // namespace art
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index cd25232c21..dbd668b330 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -503,7 +503,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
if (!unconditional) {
InsertLIRBefore(lir, hop_target);
}
- lir->flags.is_nop = true;
+ NopLIR(lir);
}
/*
@@ -561,7 +561,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) {
RawLIR(lir->dalvik_offset, kMipsAddu,
lir->operands[0], lir->operands[0], r_RA);
InsertLIRBefore(lir, new_addu);
- lir->flags.is_nop = true;
+ NopLIR(lir);
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index b9cb720962..8d0b347a34 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -52,7 +52,6 @@ class MipsMir2Lir : public Mir2Lir {
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4ee5b23eb9..8e768dcf18 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -399,11 +399,6 @@ RegLocation MipsMir2Lir::GetReturnAlt() {
return res;
}
-MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg) {
- return MIPS_FPREG(reg) ? &reg_pool_->FPRegs[reg & MIPS_FP_REG_MASK]
- : &reg_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void MipsMir2Lir::LockCallTemps() {
LockTemp(rMIPS_ARG0);
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 440df2afa6..0ca8d8de11 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -33,7 +33,12 @@ inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
p->def_end = NULL;
if (p->pair) {
p->pair = false;
- Clobber(p->partner);
+ p = GetRegInfo(p->partner);
+ p->pair = false;
+ p->live = false;
+ p->s_reg = INVALID_SREG;
+ p->def_start = NULL;
+ p->def_end = NULL;
}
}
}
@@ -196,6 +201,11 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
SetupTargetResourceMasks(lir);
}
+inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(int reg) {
+ DCHECK(reginfo_map_.Get(reg) != NULL);
+ return reginfo_map_.Get(reg);
+}
+
} // namespace art
#endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c41feb1348..6f398696dd 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -18,6 +18,7 @@
#include "dex/dataflow_iterator-inl.h"
#include "mir_to_lir-inl.h"
#include "object_utils.h"
+#include "thread-inl.h"
namespace art {
@@ -706,16 +707,15 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
}
// Free temp registers and reset redundant store tracking.
- ResetRegPool();
- ResetDefTracking();
-
ClobberAllRegs();
if (bb->block_type == kEntryBlock) {
+ ResetRegPool();
int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
} else if (bb->block_type == kExitBlock) {
+ ResetRegPool();
GenExitSequence();
}
@@ -736,16 +736,16 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
current_dalvik_offset_ = mir->offset;
int opcode = mir->dalvikInsn.opcode;
- LIR* boundary_lir;
// Mark the beginning of a Dalvik instruction for line tracking.
- char* inst_str = cu_->verbose ?
- mir_graph_->GetDalvikDisassembly(mir) : NULL;
- boundary_lir = MarkBoundary(mir->offset, inst_str);
+ if (cu_->verbose) {
+ char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
+ MarkBoundary(mir->offset, inst_str);
+ }
// Remember the first LIR for this block.
if (head_lir == NULL) {
- head_lir = boundary_lir;
- // Set the first boundary_lir as a scheduling barrier.
+ head_lir = &block_label_list_[bb->id];
+ // Set the first label as a scheduling barrier.
head_lir->def_mask = ENCODE_ALL;
}
@@ -771,11 +771,6 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
if (head_lir) {
// Eliminate redundant loads/stores and delay stores into later slots.
ApplyLocalOptimizations(head_lir, last_lir_insn_);
-
- // Generate an unconditional branch to the fallthrough block.
- if (bb->fall_through) {
- OpUnconditionalBranch(&block_label_list_[bb->fall_through->id]);
- }
}
return false;
}
@@ -815,9 +810,19 @@ void Mir2Lir::MethodMIR2LIR() {
static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
ArenaAllocator::kAllocLIR));
- PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
- MethodBlockCodeGen(bb);
+ PreOrderDfsIterator iter(mir_graph_);
+ BasicBlock* curr_bb = iter.Next();
+ BasicBlock* next_bb = iter.Next();
+ while (curr_bb != NULL) {
+ MethodBlockCodeGen(curr_bb);
+ // If the fall_through block is no longer laid out consecutively, drop in a branch.
+ if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
+ OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+ }
+ curr_bb = next_bb;
+ do {
+ next_bb = iter.Next();
+ } while ((next_bb != NULL) && (next_bb->block_type == kDead));
}
HandleSuspendLaunchPads();
@@ -825,10 +830,6 @@ void Mir2Lir::MethodMIR2LIR() {
HandleThrowLaunchPads();
HandleIntrinsicLaunchPads();
-
- if (!(cu_->disable_opt & (1 << kSafeOptimizations))) {
- RemoveRedundantBranches();
- }
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index a37ebd173f..7d6f968da5 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -158,6 +158,10 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
#define ENCODE_ALL (~0ULL)
#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+
+// Mask to denote sreg as the start of a double. Must not interfere with low 16 bits.
+#define STARTING_DOUBLE_SREG 0x10000
+
// TODO: replace these macros
#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
@@ -187,7 +191,6 @@ class Mir2Lir : public Backend {
struct RefCounts {
int count;
int s_reg;
- bool double_start; // Starting v_reg for a double
};
/*
@@ -250,7 +253,7 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
+ bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
void SetupResourceMasks(LIR* lir);
void AssembleLIR();
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
@@ -274,13 +277,14 @@ class Mir2Lir : public Backend {
void ProcessSwitchTables();
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
- LIR* MarkBoundary(int offset, const char* inst_str);
+ void MarkBoundary(int offset, const char* inst_str);
void NopLIR(LIR* lir);
+ void UnlinkLIR(LIR* lir);
bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
bool IsInexpensiveConstant(RegLocation rl_src);
ConditionCode FlipComparisonOrder(ConditionCode before);
- void DumpMappingTable(const char* table_name, const std::string& descriptor,
- const std::string& name, const std::string& signature,
+ void DumpMappingTable(const char* table_name, const char* descriptor,
+ const char* name, const Signature& signature,
const std::vector<uint32_t>& v);
void InstallLiteralPools();
void InstallSwitchTables();
@@ -302,7 +306,6 @@ class Mir2Lir : public Backend {
void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
- void RemoveRedundantBranches();
// Shared by all targets - implemented in ralloc_util.cc
int GetSRegHi(int lowSreg);
@@ -324,11 +327,9 @@ class Mir2Lir : public Backend {
void RecordCorePromotion(int reg, int s_reg);
int AllocPreservedCoreReg(int s_reg);
void RecordFpPromotion(int reg, int s_reg);
- int AllocPreservedSingle(int s_reg, bool even);
+ int AllocPreservedSingle(int s_reg);
int AllocPreservedDouble(int s_reg);
- int AllocPreservedFPReg(int s_reg, bool double_start);
- int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
- bool required);
+ int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, bool required);
int AllocTempDouble();
int AllocFreeTemp();
int AllocTemp();
@@ -367,13 +368,14 @@ class Mir2Lir : public Backend {
RegLocation UpdateRawLoc(RegLocation loc);
RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
- void CountRefs(RefCounts* core_counts, RefCounts* fp_counts);
+ void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
void DumpCounts(const RefCounts* arr, int size, const char* msg);
void DoPromotion();
int VRegOffset(int v_reg);
int SRegOffset(int s_reg);
RegLocation GetReturnWide(bool is_double);
RegLocation GetReturn(bool is_float);
+ RegisterInfo* GetRegInfo(int reg);
// Shared by all targets - implemented in gen_common.cc.
bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
@@ -550,7 +552,6 @@ class Mir2Lir : public Backend {
virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
virtual int S2d(int low_reg, int high_reg) = 0;
virtual int TargetReg(SpecialTargetRegister reg) = 0;
- virtual RegisterInfo* GetRegInfo(int reg) = 0;
virtual RegLocation GetReturnAlt() = 0;
virtual RegLocation GetReturnWideAlt() = 0;
virtual RegLocation LocCReturn() = 0;
@@ -727,7 +728,8 @@ class Mir2Lir : public Backend {
GrowableArray<LIR*> throw_launchpads_;
GrowableArray<LIR*> suspend_launchpads_;
GrowableArray<LIR*> intrinsic_launchpads_;
- SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
+ GrowableArray<RegisterInfo*> tempreg_info_;
+ GrowableArray<RegisterInfo*> reginfo_map_;
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
* Native PC is on the return address of the safepointed operation. Dex PC is for
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 71b74a4a68..7927ff9864 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -28,13 +28,9 @@ namespace art {
* live until it is either explicitly killed or reallocated.
*/
void Mir2Lir::ResetRegPool() {
- for (int i = 0; i < reg_pool_->num_core_regs; i++) {
- if (reg_pool_->core_regs[i].is_temp)
- reg_pool_->core_regs[i].in_use = false;
- }
- for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
- if (reg_pool_->FPRegs[i].is_temp)
- reg_pool_->FPRegs[i].in_use = false;
+ GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
+ for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ info->in_use = false;
}
// Reset temp tracking sanity check.
if (kIsDebugBuild) {
@@ -48,13 +44,21 @@ void Mir2Lir::ResetRegPool() {
*/
void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num) {
for (int i = 0; i < num; i++) {
- regs[i].reg = reg_nums[i];
+ uint32_t reg_number = reg_nums[i];
+ regs[i].reg = reg_number;
regs[i].in_use = false;
regs[i].is_temp = false;
regs[i].pair = false;
regs[i].live = false;
regs[i].dirty = false;
regs[i].s_reg = INVALID_SREG;
+ size_t map_size = reginfo_map_.Size();
+ if (reg_number >= map_size) {
+ for (uint32_t i = 0; i < ((reg_number - map_size) + 1); i++) {
+ reginfo_map_.Insert(NULL);
+ }
+ }
+ reginfo_map_.Put(reg_number, &regs[i]);
}
}
@@ -170,17 +174,12 @@ void Mir2Lir::RecordFpPromotion(int reg, int s_reg) {
promotion_map_[p_map_idx].FpReg = reg;
}
-/*
- * Reserve a callee-save fp single register. Try to fullfill request for
- * even/odd allocation, but go ahead and allocate anything if not
- * available. If nothing's available, return -1.
- */
-int Mir2Lir::AllocPreservedSingle(int s_reg, bool even) {
- int res = -1;
+// Reserve a callee-save fp single register.
+int Mir2Lir::AllocPreservedSingle(int s_reg) {
+ int res = -1; // Return code if none available.
RegisterInfo* FPRegs = reg_pool_->FPRegs;
for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
- if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
- ((FPRegs[i].reg & 0x1) == 0) == even) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use) {
res = FPRegs[i].reg;
RecordFpPromotion(res, s_reg);
break;
@@ -246,26 +245,6 @@ int Mir2Lir::AllocPreservedDouble(int s_reg) {
return res;
}
-
-/*
- * Reserve a callee-save fp register. If this register can be used
- * as the first of a double, attempt to allocate an even pair of fp
- * single regs (but if can't still attempt to allocate a single, preferring
- * first to allocate an odd register.
- */
-int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start) {
- int res = -1;
- if (double_start) {
- res = AllocPreservedDouble(s_reg);
- }
- if (res == -1) {
- res = AllocPreservedSingle(s_reg, false /* try odd # */);
- }
- if (res == -1)
- res = AllocPreservedSingle(s_reg, true /* try even # */);
- return res;
-}
-
int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
bool required) {
int next = *next_temp;
@@ -379,7 +358,7 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int
if (s_reg == -1)
return NULL;
for (int i = 0; i < num_regs; i++) {
- if (p[i].live && (p[i].s_reg == s_reg)) {
+ if ((p[i].s_reg == s_reg) && p[i].live) {
if (p[i].is_temp)
p[i].in_use = true;
return &p[i];
@@ -412,47 +391,16 @@ Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class) {
}
void Mir2Lir::FreeTemp(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- if (p[i].is_temp) {
- p[i].in_use = false;
- }
- p[i].pair = false;
- return;
- }
- }
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- if (p[i].is_temp) {
- p[i].in_use = false;
- }
- p[i].pair = false;
- return;
- }
+ RegisterInfo* p = GetRegInfo(reg);
+ if (p->is_temp) {
+ p->in_use = false;
}
- LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+ p->pair = false;
}
Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
- }
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- return p[i].live ? &p[i] : NULL;
- }
- }
- return NULL;
+ RegisterInfo* p = GetRegInfo(reg);
+ return p->live ? p : NULL;
}
Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg) {
@@ -476,27 +424,10 @@ bool Mir2Lir::IsDirty(int reg) {
* allocated. Use with caution.
*/
void Mir2Lir::LockTemp(int reg) {
- RegisterInfo* p = reg_pool_->core_regs;
- int num_regs = reg_pool_->num_core_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].is_temp);
- p[i].in_use = true;
- p[i].live = false;
- return;
- }
- }
- p = reg_pool_->FPRegs;
- num_regs = reg_pool_->num_fp_regs;
- for (int i = 0; i< num_regs; i++) {
- if (p[i].reg == reg) {
- DCHECK(p[i].is_temp);
- p[i].in_use = true;
- p[i].live = false;
- return;
- }
- }
- LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+ RegisterInfo* p = GetRegInfo(reg);
+ DCHECK(p->is_temp);
+ p->in_use = true;
+ p->live = false;
}
void Mir2Lir::ResetDef(int reg) {
@@ -599,11 +530,13 @@ void Mir2Lir::ResetDefTracking() {
}
void Mir2Lir::ClobberAllRegs() {
- for (int i = 0; i< reg_pool_->num_core_regs; i++) {
- ClobberBody(&reg_pool_->core_regs[i]);
- }
- for (int i = 0; i< reg_pool_->num_fp_regs; i++) {
- ClobberBody(&reg_pool_->FPRegs[i]);
+ GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_);
+ for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) {
+ info->live = false;
+ info->s_reg = INVALID_SREG;
+ info->def_start = NULL;
+ info->def_end = NULL;
+ info->pair = false;
}
}
@@ -659,11 +592,13 @@ void Mir2Lir::MarkLive(int reg, int s_reg) {
void Mir2Lir::MarkTemp(int reg) {
RegisterInfo* info = GetRegInfo(reg);
+ tempreg_info_.Insert(info);
info->is_temp = true;
}
void Mir2Lir::UnmarkTemp(int reg) {
RegisterInfo* info = GetRegInfo(reg);
+ tempreg_info_.Delete(info);
info->is_temp = false;
}
@@ -912,18 +847,22 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
}
/* USE SSA names to count references of base Dalvik v_regs. */
-void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts) {
+void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
RegLocation loc = mir_graph_->reg_location_[i];
RefCounts* counts = loc.fp ? fp_counts : core_counts;
int p_map_idx = SRegToPMap(loc.s_reg_low);
- // Don't count easily regenerated immediates
- if (loc.fp || !IsInexpensiveConstant(loc)) {
+ if (loc.fp) {
+ if (loc.wide) {
+ // Treat doubles as a unit, using upper half of fp_counts array.
+ counts[p_map_idx + num_regs].count += mir_graph_->GetUseCount(i);
+ i++;
+ } else {
+ counts[p_map_idx].count += mir_graph_->GetUseCount(i);
+ }
+ } else if (!IsInexpensiveConstant(loc)) {
counts[p_map_idx].count += mir_graph_->GetUseCount(i);
}
- if (loc.wide && loc.fp && !loc.high_word) {
- counts[p_map_idx].double_start = true;
- }
}
}
@@ -942,7 +881,11 @@ static int SortCounts(const void *val1, const void *val2) {
void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) {
LOG(INFO) << msg;
for (int i = 0; i < size; i++) {
- LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+ if ((arr[i].s_reg & STARTING_DOUBLE_SREG) != 0) {
+ LOG(INFO) << "s_reg[D" << (arr[i].s_reg & ~STARTING_DOUBLE_SREG) << "]: " << arr[i].count;
+ } else {
+ LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+ }
}
}
@@ -965,7 +908,7 @@ void Mir2Lir::DoPromotion() {
* count based on original Dalvik register name. Count refs
* separately based on type in order to give allocation
* preference to fp doubles - which must be allocated sequential
- * physical single fp registers started with an even-numbered
+ * physical single fp registers starting with an even-numbered
* reg.
* TUNING: replace with linear scan once we have the ability
* to describe register live ranges for GC.
@@ -974,7 +917,7 @@ void Mir2Lir::DoPromotion() {
static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * num_regs,
ArenaAllocator::kAllocRegAlloc));
RefCounts *FpRegs =
- static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs,
+ static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs * 2,
ArenaAllocator::kAllocRegAlloc));
// Set ssa names for original Dalvik registers
for (int i = 0; i < dalvik_regs; i++) {
@@ -982,46 +925,49 @@ void Mir2Lir::DoPromotion() {
}
// Set ssa name for Method*
core_regs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();
- FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg(); // For consistecy
+ FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg(); // For consistecy.
+ FpRegs[dalvik_regs + num_regs].s_reg = mir_graph_->GetMethodSReg(); // for consistency.
// Set ssa names for compiler_temps
for (int i = 1; i <= cu_->num_compiler_temps; i++) {
CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
core_regs[dalvik_regs + i].s_reg = ct->s_reg;
FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
+ FpRegs[num_regs + dalvik_regs + i].s_reg = ct->s_reg;
}
- // Sum use counts of SSA regs by original Dalvik vreg.
- CountRefs(core_regs, FpRegs);
-
- /*
- * Ideally, we'd allocate doubles starting with an even-numbered
- * register. Bias the counts to try to allocate any vreg that's
- * used as the start of a pair first.
- */
+ // Duplicate in upper half to represent possible fp double starting sregs.
for (int i = 0; i < num_regs; i++) {
- if (FpRegs[i].double_start) {
- FpRegs[i].count *= 2;
- }
+ FpRegs[num_regs + i].s_reg = FpRegs[i].s_reg | STARTING_DOUBLE_SREG;
}
+ // Sum use counts of SSA regs by original Dalvik vreg.
+ CountRefs(core_regs, FpRegs, num_regs);
+
+
// Sort the count arrays
qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
- qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
+ qsort(FpRegs, num_regs * 2, sizeof(RefCounts), SortCounts);
if (cu_->verbose) {
DumpCounts(core_regs, num_regs, "Core regs after sort");
- DumpCounts(FpRegs, num_regs, "Fp regs after sort");
+ DumpCounts(FpRegs, num_regs * 2, "Fp regs after sort");
}
if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
// Promote FpRegs
- for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) {
- int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
- if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
- int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
- FpRegs[i].double_start);
+ for (int i = 0; (i < (num_regs * 2)) && (FpRegs[i].count >= promotion_threshold); i++) {
+ int p_map_idx = SRegToPMap(FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG);
+ if ((FpRegs[i].s_reg & STARTING_DOUBLE_SREG) != 0) {
+ if ((promotion_map_[p_map_idx].fp_location != kLocPhysReg) &&
+ (promotion_map_[p_map_idx + 1].fp_location != kLocPhysReg)) {
+ int low_sreg = FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG;
+ // Ignore result - if can't alloc double may still be able to alloc singles.
+ AllocPreservedDouble(low_sreg);
+ }
+ } else if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
+ int reg = AllocPreservedSingle(FpRegs[i].s_reg);
if (reg < 0) {
- break; // No more left
+ break; // No more left.
}
}
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index e8834320a9..3e768837ff 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1237,7 +1237,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
delta = target - pc;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
// Useless branch
- lir->flags.is_nop = true;
+ NopLIR(lir);
if (kVerbosePcFixup) {
LOG(INFO) << "Retry for useless branch at " << lir->offset;
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 478654d0b4..0f281106b2 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -52,7 +52,6 @@ class X86Mir2Lir : public Mir2Lir {
int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
- RegisterInfo* GetRegInfo(int reg);
RegLocation GetReturnAlt();
RegLocation GetReturnWideAlt();
RegLocation LocCReturn();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 26accab360..94dd759e91 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -375,11 +375,6 @@ RegLocation X86Mir2Lir::GetReturnAlt() {
return res;
}
-X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg) {
- return X86_FPREG(reg) ? &reg_pool_->FPRegs[reg & X86_FP_REG_MASK]
- : &reg_pool_->core_regs[reg];
-}
-
/* To be used when explicitly managing register use */
void X86Mir2Lir::LockCallTemps() {
LockTemp(rX86_ARG0);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index cd1602f674..366d7f26be 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -22,7 +22,7 @@
namespace art {
void MIRGraph::ClearAllVisitedFlags() {
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
bb->visited = false;
}
@@ -145,11 +145,11 @@ void MIRGraph::ComputeDefBlockMatrix() {
def_block_matrix_[i] =
new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
}
- AllNodesIterator iter(this, false /* not iterative */);
+ AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
- AllNodesIterator iter2(this, false /* not iterative */);
+ AllNodesIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -377,7 +377,7 @@ void MIRGraph::ComputeDominators() {
int num_total_blocks = GetBasicBlockListCount();
/* Initialize domination-related data structures */
- ReachableNodesIterator iter(this, false /* not iterative */);
+ PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -396,7 +396,7 @@ void MIRGraph::ComputeDominators() {
i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
/* Compute the immediate dominators */
- ReversePostOrderDfsIterator iter2(this, true /* iterative */);
+ RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
@@ -414,19 +414,19 @@ void MIRGraph::ComputeDominators() {
}
GetEntryBlock()->i_dom = NULL;
- ReachableNodesIterator iter3(this, false /* not iterative */);
+ PreOrderDfsIterator iter3(this);
for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
SetDominators(bb);
}
- ReversePostOrderDfsIterator iter4(this, false /* not iterative */);
+ ReversePostOrderDfsIterator iter4(this);
for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
- PostOrderDOMIterator iter5(this, false /* not iterative */);
+ PostOrderDOMIterator iter5(this);
for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -503,7 +503,7 @@ void MIRGraph::InsertPhiNodes() {
temp_dalvik_register_v_ =
new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
- PostOrderDfsIterator iter(this, true /* iterative */);
+ RepeatingPostOrderDfsIterator iter(this);
bool change = false;
for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
@@ -700,7 +700,7 @@ void MIRGraph::SSATransformation() {
new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
/* Insert phi-operands with latest SSA names from predecessor blocks */
- ReachableNodesIterator iter2(this, false /* not iterative */);
+ PreOrderDfsIterator iter2(this);
for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
InsertPhiNodeOperands(bb);
}
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 07f37bbbbb..32fac0b393 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -29,6 +29,16 @@ bool MIRGraph::SetFp(int index, bool is_fp) {
return change;
}
+bool MIRGraph::SetFp(int index) {
+ bool change = false;
+ if (!reg_location_[index].fp) {
+ reg_location_[index].fp = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetCore(int index, bool is_core) {
bool change = false;
if (is_core && !reg_location_[index].defined) {
@@ -39,6 +49,16 @@ bool MIRGraph::SetCore(int index, bool is_core) {
return change;
}
+bool MIRGraph::SetCore(int index) {
+ bool change = false;
+ if (!reg_location_[index].defined) {
+ reg_location_[index].core = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetRef(int index, bool is_ref) {
bool change = false;
if (is_ref && !reg_location_[index].defined) {
@@ -49,6 +69,16 @@ bool MIRGraph::SetRef(int index, bool is_ref) {
return change;
}
+bool MIRGraph::SetRef(int index) {
+ bool change = false;
+ if (!reg_location_[index].defined) {
+ reg_location_[index].ref = true;
+ reg_location_[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetWide(int index, bool is_wide) {
bool change = false;
if (is_wide && !reg_location_[index].wide) {
@@ -58,6 +88,15 @@ bool MIRGraph::SetWide(int index, bool is_wide) {
return change;
}
+bool MIRGraph::SetWide(int index) {
+ bool change = false;
+ if (!reg_location_[index].wide) {
+ reg_location_[index].wide = true;
+ change = true;
+ }
+ return change;
+}
+
bool MIRGraph::SetHigh(int index, bool is_high) {
bool change = false;
if (is_high && !reg_location_[index].high_word) {
@@ -67,6 +106,16 @@ bool MIRGraph::SetHigh(int index, bool is_high) {
return change;
}
+bool MIRGraph::SetHigh(int index) {
+ bool change = false;
+ if (!reg_location_[index].high_word) {
+ reg_location_[index].high_word = true;
+ change = true;
+ }
+ return change;
+}
+
+
/*
* Infer types and sizes. We don't need to track change on sizes,
* as it doesn't propagate. We're guaranteed at least one pass through
@@ -84,21 +133,23 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
SSARepresentation *ssa_rep = mir->ssa_rep;
if (ssa_rep) {
int attrs = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+ const int* uses = ssa_rep->uses;
+ const int* defs = ssa_rep->defs;
// Handle defs
if (attrs & DF_DA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(ssa_rep->defs[0], true);
+ changed |= SetCore(defs[0]);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(ssa_rep->defs[0], true);
+ changed |= SetRef(defs[0]);
}
if (attrs & DF_A_WIDE) {
- reg_location_[ssa_rep->defs[0]].wide = true;
- reg_location_[ssa_rep->defs[1]].wide = true;
- reg_location_[ssa_rep->defs[1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->defs[0])+1,
- SRegToVReg(ssa_rep->defs[1]));
+ reg_location_[defs[0]].wide = true;
+ reg_location_[defs[1]].wide = true;
+ reg_location_[defs[1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(defs[0])+1,
+ SRegToVReg(defs[1]));
}
}
@@ -106,17 +157,17 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
int next = 0;
if (attrs & DF_UA) {
if (attrs & DF_CORE_A) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_A) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_A_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
next += 2;
} else {
next++;
@@ -124,17 +175,17 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
}
if (attrs & DF_UB) {
if (attrs & DF_CORE_B) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_B) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_B_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
next += 2;
} else {
next++;
@@ -142,17 +193,17 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
}
if (attrs & DF_UC) {
if (attrs & DF_CORE_C) {
- changed |= SetCore(ssa_rep->uses[next], true);
+ changed |= SetCore(uses[next]);
}
if (attrs & DF_REF_C) {
- changed |= SetRef(ssa_rep->uses[next], true);
+ changed |= SetRef(uses[next]);
}
if (attrs & DF_C_WIDE) {
- reg_location_[ssa_rep->uses[next]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].wide = true;
- reg_location_[ssa_rep->uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
- SRegToVReg(ssa_rep->uses[next + 1]));
+ reg_location_[uses[next]].wide = true;
+ reg_location_[uses[next + 1]].wide = true;
+ reg_location_[uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[next])+1,
+ SRegToVReg(uses[next + 1]));
}
}
@@ -162,27 +213,27 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
(mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
switch (cu_->shorty[0]) {
case 'I':
- changed |= SetCore(ssa_rep->uses[0], true);
+ changed |= SetCore(uses[0]);
break;
case 'J':
- changed |= SetCore(ssa_rep->uses[0], true);
- changed |= SetCore(ssa_rep->uses[1], true);
- reg_location_[ssa_rep->uses[0]].wide = true;
- reg_location_[ssa_rep->uses[1]].wide = true;
- reg_location_[ssa_rep->uses[1]].high_word = true;
+ changed |= SetCore(uses[0]);
+ changed |= SetCore(uses[1]);
+ reg_location_[uses[0]].wide = true;
+ reg_location_[uses[1]].wide = true;
+ reg_location_[uses[1]].high_word = true;
break;
case 'F':
- changed |= SetFp(ssa_rep->uses[0], true);
+ changed |= SetFp(uses[0]);
break;
case 'D':
- changed |= SetFp(ssa_rep->uses[0], true);
- changed |= SetFp(ssa_rep->uses[1], true);
- reg_location_[ssa_rep->uses[0]].wide = true;
- reg_location_[ssa_rep->uses[1]].wide = true;
- reg_location_[ssa_rep->uses[1]].high_word = true;
+ changed |= SetFp(uses[0]);
+ changed |= SetFp(uses[1]);
+ reg_location_[uses[0]].wide = true;
+ reg_location_[uses[1]].wide = true;
+ reg_location_[uses[1]].high_word = true;
break;
case 'L':
- changed |= SetRef(ssa_rep->uses[0], true);
+ changed |= SetRef(uses[0]);
break;
default: break;
}
@@ -206,10 +257,10 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
DCHECK(tgt_rep != NULL);
tgt_rep->fp_def[0] = true;
- changed |= SetFp(tgt_rep->defs[0], true);
+ changed |= SetFp(tgt_rep->defs[0]);
if (shorty[0] == 'D') {
tgt_rep->fp_def[1] = true;
- changed |= SetFp(tgt_rep->defs[1], true);
+ changed |= SetFp(tgt_rep->defs[1]);
}
}
}
@@ -217,8 +268,8 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
// If this is a non-static invoke, mark implicit "this"
if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
(mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
- reg_location_[ssa_rep->uses[next]].defined = true;
- reg_location_[ssa_rep->uses[next]].ref = true;
+ reg_location_[uses[next]].defined = true;
+ reg_location_[uses[next]].ref = true;
next++;
}
uint32_t cpos = 1;
@@ -229,28 +280,28 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
case 'D':
ssa_rep->fp_use[i] = true;
ssa_rep->fp_use[i+1] = true;
- reg_location_[ssa_rep->uses[i]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
+ reg_location_[uses[i]].wide = true;
+ reg_location_[uses[i+1]].wide = true;
+ reg_location_[uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
i++;
break;
case 'J':
- reg_location_[ssa_rep->uses[i]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].wide = true;
- reg_location_[ssa_rep->uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
- changed |= SetCore(ssa_rep->uses[i], true);
+ reg_location_[uses[i]].wide = true;
+ reg_location_[uses[i+1]].wide = true;
+ reg_location_[uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
+ changed |= SetCore(uses[i]);
i++;
break;
case 'F':
ssa_rep->fp_use[i] = true;
break;
case 'L':
- changed |= SetRef(ssa_rep->uses[i], true);
+ changed |= SetRef(uses[i]);
break;
default:
- changed |= SetCore(ssa_rep->uses[i], true);
+ changed |= SetCore(uses[i]);
break;
}
i++;
@@ -260,11 +311,11 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
if (ssa_rep->fp_use[i])
- changed |= SetFp(ssa_rep->uses[i], true);
+ changed |= SetFp(uses[i]);
}
for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
if (ssa_rep->fp_def[i])
- changed |= SetFp(ssa_rep->defs[i], true);
+ changed |= SetFp(defs[i]);
}
// Special-case handling for moves & Phi
if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
@@ -276,14 +327,14 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
*/
bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
kMirOpPhi);
- RegLocation rl_temp = reg_location_[ssa_rep->defs[0]];
+ RegLocation rl_temp = reg_location_[defs[0]];
bool defined_fp = rl_temp.defined && rl_temp.fp;
bool defined_core = rl_temp.defined && rl_temp.core;
bool defined_ref = rl_temp.defined && rl_temp.ref;
bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
for (int i = 0; i < ssa_rep->num_uses; i++) {
- rl_temp = reg_location_[ssa_rep->uses[i]];
+ rl_temp = reg_location_[uses[i]];
defined_fp |= rl_temp.defined && rl_temp.fp;
defined_core |= rl_temp.defined && rl_temp.core;
defined_ref |= rl_temp.defined && rl_temp.ref;
@@ -303,26 +354,26 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
<< " has both fp and core/ref uses for same def.";
cu_->disable_opt |= (1 << kPromoteRegs);
}
- changed |= SetFp(ssa_rep->defs[0], defined_fp);
- changed |= SetCore(ssa_rep->defs[0], defined_core);
- changed |= SetRef(ssa_rep->defs[0], defined_ref);
- changed |= SetWide(ssa_rep->defs[0], is_wide);
- changed |= SetHigh(ssa_rep->defs[0], is_high);
+ changed |= SetFp(defs[0], defined_fp);
+ changed |= SetCore(defs[0], defined_core);
+ changed |= SetRef(defs[0], defined_ref);
+ changed |= SetWide(defs[0], is_wide);
+ changed |= SetHigh(defs[0], is_high);
if (attrs & DF_A_WIDE) {
- changed |= SetWide(ssa_rep->defs[1], true);
- changed |= SetHigh(ssa_rep->defs[1], true);
+ changed |= SetWide(defs[1]);
+ changed |= SetHigh(defs[1]);
}
for (int i = 0; i < ssa_rep->num_uses; i++) {
- changed |= SetFp(ssa_rep->uses[i], defined_fp);
- changed |= SetCore(ssa_rep->uses[i], defined_core);
- changed |= SetRef(ssa_rep->uses[i], defined_ref);
- changed |= SetWide(ssa_rep->uses[i], is_wide);
- changed |= SetHigh(ssa_rep->uses[i], is_high);
+ changed |= SetFp(uses[i], defined_fp);
+ changed |= SetCore(uses[i], defined_core);
+ changed |= SetRef(uses[i], defined_ref);
+ changed |= SetWide(uses[i], is_wide);
+ changed |= SetHigh(uses[i], is_high);
}
if (attrs & DF_A_WIDE) {
DCHECK_EQ(ssa_rep->num_uses, 2);
- changed |= SetWide(ssa_rep->uses[1], true);
- changed |= SetHigh(ssa_rep->uses[1], true);
+ changed |= SetWide(uses[1]);
+ changed |= SetHigh(uses[1]);
}
}
}
@@ -444,7 +495,7 @@ void MIRGraph::BuildRegLocations() {
}
/* Do type & size inference pass */
- PreOrderDfsIterator iter(this, true /* iterative */);
+ RepeatingPreOrderDfsIterator iter(this);
bool change = false;
for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
change = InferTypeAndSize(bb);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6eabeed34b..056be1fb04 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -355,7 +355,11 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet
jni_compiler_(NULL),
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
- support_boot_image_fixup_(true) {
+ support_boot_image_fixup_(true),
+ dedupe_code_("dedupe code"),
+ dedupe_mapping_table_("dedupe mapping table"),
+ dedupe_vmap_table_("dedupe vmap table"),
+ dedupe_gc_map_("dedupe gc map") {
CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
@@ -596,12 +600,11 @@ void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const De
UpdateImageClasses(timings);
}
-bool CompilerDriver::IsImageClass(const char* descriptor) const {
- DCHECK(descriptor != NULL);
+bool CompilerDriver::IsImageClass(const StringPiece& descriptor) const {
if (!IsImage()) {
return true;
} else {
- return image_classes_->find(descriptor) != image_classes_->end();
+ return image_classes_->find(descriptor.data()) != image_classes_->end();
}
}
@@ -776,7 +779,8 @@ void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) {
bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file,
uint32_t type_idx) {
- if (IsImage() && IsImageClass(dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)))) {
+ if (IsImage() &&
+ IsImageClass(dex_file.StringDataAsStringPieceByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_))) {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
@@ -912,9 +916,9 @@ static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(ScopedObjectA
}
static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType type)
+ const DexCompilationUnit* mUnit,
+ uint32_t method_idx,
+ InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
@@ -923,11 +927,11 @@ static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjec
}
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, bool& is_volatile, bool is_put) {
+ bool is_put, int* field_offset, bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
// Conservative defaults.
- field_offset = -1;
- is_volatile = true;
+ *field_offset = -1;
+ *is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && !resolved_field->IsStatic()) {
@@ -954,8 +958,8 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
fields_class != referrer_class;
if (access_ok && !is_write_to_final_from_wrong_class) {
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedInstanceField();
return true; // Fast path.
}
@@ -970,15 +974,14 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, int& ssb_index,
- bool& is_referrers_class, bool& is_volatile,
- bool is_put) {
+ bool is_put, int* field_offset, int* ssb_index,
+ bool* is_referrers_class, bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
// Conservative defaults.
- field_offset = -1;
- ssb_index = -1;
- is_referrers_class = false;
- is_volatile = true;
+ *field_offset = -1;
+ *ssb_index = -1;
+ *is_referrers_class = false;
+ *is_volatile = true;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
@@ -988,9 +991,9 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
- is_referrers_class = true; // implies no worrying about class initialization
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *is_referrers_class = true; // implies no worrying about class initialization
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedLocalStaticField();
return true; // fast path
} else {
@@ -1021,9 +1024,9 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
if (fields_class->GetDexCache() == dex_cache) {
// common case where the dex cache of both the referrer and the field are the same,
// no need to search the dex file
- ssb_index = fields_class->GetDexTypeIndex();
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *ssb_index = fields_class->GetDexTypeIndex();
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedStaticField();
return true;
}
@@ -1036,9 +1039,9 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
if (type_id != NULL) {
// medium path, needs check of static storage base being initialized
- ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
- field_offset = resolved_field->GetOffset().Int32Value();
- is_volatile = resolved_field->IsVolatile();
+ *ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
+ *field_offset = resolved_field->GetOffset().Int32Value();
+ *is_volatile = resolved_field->IsVolatile();
stats_->ResolvedStaticField();
return true;
}
@@ -1058,15 +1061,15 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- uintptr_t& direct_code,
- uintptr_t& direct_method,
- bool update_stats) {
+ bool update_stats,
+ uintptr_t* direct_code,
+ uintptr_t* direct_method) {
// For direct and static methods compute possible direct_code and direct_method values, ie
// an address for the Method* being invoked and an address of the code for that Method*.
// For interface calls compute a value for direct_method that is the interface method being
// invoked, so this can be passed to the out-of-line runtime support code.
- direct_code = 0;
- direct_method = 0;
+ *direct_code = 0;
+ *direct_method = 0;
if (compiler_backend_ == kPortable) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
@@ -1095,41 +1098,40 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType s
if (compiling_boot) {
if (support_boot_image_fixup_) {
MethodHelper mh(method);
- if (IsImageClass(mh.GetDeclaringClassDescriptor())) {
+ if (IsImageClass(mh.GetDeclaringClassDescriptorAsStringPiece())) {
// We can only branch directly to Methods that are resolved in the DexCache.
// Otherwise we won't invoke the resolution trampoline.
- direct_method = -1;
- direct_code = -1;
+ *direct_method = -1;
+ *direct_code = -1;
}
}
} else {
if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
- direct_method = reinterpret_cast<uintptr_t>(method);
+ *direct_method = reinterpret_cast<uintptr_t>(method);
}
- direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+ *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
}
}
bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- InvokeType& invoke_type,
- MethodReference& target_method,
- int& vtable_idx,
- uintptr_t& direct_code, uintptr_t& direct_method,
- bool update_stats) {
+ bool update_stats, bool enable_devirtualization,
+ InvokeType* invoke_type, MethodReference* target_method,
+ int* vtable_idx, uintptr_t* direct_code,
+ uintptr_t* direct_method) {
ScopedObjectAccess soa(Thread::Current());
- vtable_idx = -1;
- direct_code = 0;
- direct_method = 0;
+ *vtable_idx = -1;
+ *direct_code = 0;
+ *direct_method = 0;
mirror::ArtMethod* resolved_method =
- ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method.dex_method_index,
- invoke_type);
+ ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method->dex_method_index,
+ *invoke_type);
if (resolved_method != NULL) {
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
mirror::Class* referrer_class =
ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(),
mUnit);
- bool icce = resolved_method->CheckIncompatibleClassChange(invoke_type);
+ bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type);
if (referrer_class != NULL && !icce) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (!referrer_class->CanAccess(methods_class) ||
@@ -1140,42 +1142,42 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// method public. Resort to the dex file to determine the correct class for the access
// check.
uint16_t class_idx =
- target_method.dex_file->GetMethodId(target_method.dex_method_index).class_idx_;
- methods_class = mUnit->GetClassLinker()->ResolveType(*target_method.dex_file,
+ target_method->dex_file->GetMethodId(target_method->dex_method_index).class_idx_;
+ methods_class = mUnit->GetClassLinker()->ResolveType(*target_method->dex_file,
class_idx, referrer_class);
}
if (referrer_class->CanAccess(methods_class) &&
referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) {
- const bool kEnableFinalBasedSharpening = true;
+ const bool enableFinalBasedSharpening = enable_devirtualization;
// Sharpen a virtual call into a direct call when the target is known not to have been
// overridden (ie is final).
bool can_sharpen_virtual_based_on_type =
- (invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
+ (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- bool can_sharpen_super_based_on_type = (invoke_type == kSuper) &&
+ bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
(methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
- if (kEnableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
+ if (enableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
can_sharpen_super_based_on_type)) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
- CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method.dex_method_index) ==
+ CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
}
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, resolved_method,
- direct_code, direct_method, update_stats);
- invoke_type = kDirect;
+ GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, resolved_method,
+ update_stats, direct_code, direct_method);
+ *invoke_type = kDirect;
return true;
}
- const bool kEnableVerifierBasedSharpening = true;
- if (kEnableVerifierBasedSharpening && (invoke_type == kVirtual ||
- invoke_type == kInterface)) {
+ const bool enableVerifierBasedSharpening = enable_devirtualization;
+ if (enableVerifierBasedSharpening && (*invoke_type == kVirtual ||
+ *invoke_type == kInterface)) {
// Did the verifier record a more precise invoke target based on its type information?
const MethodReference caller_method(mUnit->GetDexFile(), mUnit->GetDexMethodIndex());
const MethodReference* devirt_map_target =
@@ -1192,14 +1194,14 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
kVirtual);
CHECK(called_method != NULL);
CHECK(!called_method->IsAbstract());
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, called_method,
- direct_code, direct_method, update_stats);
+ GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, called_method,
+ update_stats, direct_code, direct_method);
bool compiler_needs_dex_cache =
(GetCompilerBackend() == kPortable) ||
(GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
- (direct_code == 0) || (direct_code == static_cast<unsigned int>(-1)) ||
- (direct_method == 0) || (direct_method == static_cast<unsigned int>(-1));
- if ((devirt_map_target->dex_file != target_method.dex_file) &&
+ (*direct_code == 0) || (*direct_code == static_cast<unsigned int>(-1)) ||
+ (*direct_method == 0) || (*direct_method == static_cast<unsigned int>(-1));
+ if ((devirt_map_target->dex_file != target_method->dex_file) &&
compiler_needs_dex_cache) {
// We need to use the dex cache to find either the method or code, and the dex file
// containing the method isn't the one expected for the target method. Try to find
@@ -1209,7 +1211,7 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// TODO: quick only supports direct pointers with Thumb2.
// TODO: the following should be factored into a common helper routine to find
// one dex file's method within another.
- const DexFile* dexfile = target_method.dex_file;
+ const DexFile* dexfile = target_method->dex_file;
const DexFile* cm_dexfile =
called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
const DexFile::MethodId& cm_method_id =
@@ -1225,8 +1227,9 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
if (name != NULL) {
uint16_t return_type_idx;
std::vector<uint16_t> param_type_idxs;
- bool success = dexfile->CreateTypeList(&return_type_idx, &param_type_idxs,
- cm_dexfile->GetMethodSignature(cm_method_id));
+ bool success =
+ dexfile->CreateTypeList(cm_dexfile->GetMethodSignature(cm_method_id).ToString(),
+ &return_type_idx, &param_type_idxs);
if (success) {
const DexFile::ProtoId* sig =
dexfile->FindProtoId(return_type_idx, param_type_idxs);
@@ -1235,12 +1238,13 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
*name, *sig);
if (method_id != NULL) {
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
stats_->PreciseTypeDevirtualization();
}
- target_method.dex_method_index = dexfile->GetIndexForMethodId(*method_id);
- invoke_type = kDirect;
+ target_method->dex_method_index =
+ dexfile->GetIndexForMethodId(*method_id);
+ *invoke_type = kDirect;
return true;
}
}
@@ -1252,28 +1256,28 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// method in the referring method's dex cache/file.
} else {
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
- stats_->VirtualMadeDirect(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
+ stats_->VirtualMadeDirect(*invoke_type);
stats_->PreciseTypeDevirtualization();
}
- target_method = *devirt_map_target;
- invoke_type = kDirect;
+ *target_method = *devirt_map_target;
+ *invoke_type = kDirect;
return true;
}
}
}
- if (invoke_type == kSuper) {
+ if (*invoke_type == kSuper) {
// Unsharpened super calls are suspicious so go slow-path.
} else {
// Sharpening failed so generate a regular resolved method dispatch.
if (update_stats) {
- stats_->ResolvedMethod(invoke_type);
+ stats_->ResolvedMethod(*invoke_type);
}
- if (invoke_type == kVirtual || invoke_type == kSuper) {
- vtable_idx = resolved_method->GetMethodIndex();
+ if (*invoke_type == kVirtual || *invoke_type == kSuper) {
+ *vtable_idx = resolved_method->GetMethodIndex();
}
- GetCodeAndMethodForDirectCall(invoke_type, invoke_type, referrer_class, resolved_method,
- direct_code, direct_method, update_stats);
+ GetCodeAndMethodForDirectCall(*invoke_type, *invoke_type, referrer_class, resolved_method,
+ update_stats, direct_code, direct_method);
return true;
}
}
@@ -1284,7 +1288,7 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
soa.Self()->ClearException();
}
if (update_stats) {
- stats_->UnresolvedMethod(invoke_type);
+ stats_->UnresolvedMethod(*invoke_type);
}
return false; // Incomplete knowledge needs slow path.
}
@@ -1569,8 +1573,8 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
CHECK(soa.Self()->IsExceptionPending());
mirror::Throwable* exception = soa.Self()->GetException(NULL);
VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
- if (strcmp(ClassHelper(exception->GetClass()).GetDescriptor(),
- "Ljava/lang/OutOfMemoryError;") == 0) {
+ if (ClassHelper(exception->GetClass()).GetDescriptorAsStringPiece() ==
+ "Ljava/lang/OutOfMemoryError;") {
// There's little point continuing compilation if the heap is exhausted.
LOG(FATAL) << "Out of memory during type resolution for compilation";
}
@@ -1589,13 +1593,11 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
if (IsImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " Types").c_str()));
+ timings.NewSplit("Resolve Types");
context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
}
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " MethodsAndFields").c_str()));
+ timings.NewSplit("Resolve MethodsAndFields");
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
}
@@ -1658,8 +1660,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Verify " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("Verify Dex File");
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
@@ -2084,11 +2085,14 @@ static const char* class_initializer_black_list[] = {
static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
ATRACE_CALL();
- const DexFile::ClassDef& class_def = manager->GetDexFile()->GetClassDef(class_def_index);
+ const DexFile* dex_file = manager->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+ const DexFile::TypeId& class_type_id = dex_file->GetTypeId(class_def.class_idx_);
+ StringPiece descriptor(dex_file->StringDataAsStringPieceByIdx(class_type_id.descriptor_idx_));
+
ScopedObjectAccess soa(Thread::Current());
mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
- const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def);
- mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader);
+ mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor.data(), class_loader);
if (klass != NULL) {
// Only try to initialize classes that were successfully verified.
if (klass->IsVerified()) {
@@ -2118,7 +2122,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
bool is_black_listed = StringPiece(descriptor).ends_with("$NoPreloadHolder;");
if (!is_black_listed) {
for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) {
- if (StringPiece(descriptor) == class_initializer_black_list[i]) {
+ if (descriptor == class_initializer_black_list[i]) {
is_black_listed = true;
break;
}
@@ -2126,7 +2130,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
}
if (!is_black_listed) {
VLOG(compiler) << "Initializing: " << descriptor;
- if (StringPiece(descriptor) == "Ljava/lang/Void;") {
+ if (descriptor == "Ljava/lang/Void;") {
// Hand initialize j.l.Void to avoid Dex file operations in un-started runtime.
ObjectLock lock(soa.Self(), klass);
mirror::ObjectArray<mirror::ArtField>* fields = klass->GetSFields();
@@ -2157,8 +2161,7 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("InitializeNoClinit " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("InitializeNoClinit");
#ifndef NDEBUG
// Sanity check blacklist descriptors.
if (IsImage()) {
@@ -2265,8 +2268,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
ThreadPool& thread_pool, base::TimingLogger& timings) {
- // TODO: strdup memory leak.
- timings.NewSplit(strdup(("Compile " + dex_file.GetLocation()).c_str()));
+ timings.NewSplit("Compile Dex File");
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
&dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 3852acfd3b..7657af5cee 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -170,22 +170,23 @@ class CompilerDriver {
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
- bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, bool& is_volatile, bool is_put)
+ bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
+ int* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
- bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- int& field_offset, int& ssb_index,
- bool& is_referrers_class, bool& is_volatile, bool is_put)
+ bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
+ int* field_offset, int* ssb_index,
+ bool* is_referrers_class, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
// index.
bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- InvokeType& type, MethodReference& target_method, int& vtable_idx,
- uintptr_t& direct_code, uintptr_t& direct_method, bool update_stats)
+ bool update_stats, bool enable_devirtualization,
+ InvokeType* type, MethodReference* target_method, int* vtable_idx,
+ uintptr_t* direct_code, uintptr_t* direct_method)
LOCKS_EXCLUDED(Locks::mutator_lock_);
bool IsSafeCast(const MethodReference& mr, uint32_t dex_pc);
@@ -308,7 +309,7 @@ class CompilerDriver {
}
// Checks if class specified by type_idx is one of the image_classes_
- bool IsImageClass(const char* descriptor) const;
+ bool IsImageClass(const StringPiece& descriptor) const;
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -323,8 +324,8 @@ class CompilerDriver {
void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- uintptr_t& direct_code, uintptr_t& direct_method,
- bool update_stats)
+ bool update_stats,
+ uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
@@ -458,27 +459,40 @@ class CompilerDriver {
class DedupeHashFunc {
public:
size_t operator()(const std::vector<uint8_t>& array) const {
- // Take a random sample of bytes.
+ // For small arrays compute a hash using every byte.
static const size_t kSmallArrayThreshold = 16;
- static const size_t kRandomHashCount = 16;
- size_t hash = 0;
- if (array.size() < kSmallArrayThreshold) {
- for (auto c : array) {
- hash = hash * 54 + c;
+ size_t hash = 0x811c9dc5;
+ if (array.size() <= kSmallArrayThreshold) {
+ for (uint8_t b : array) {
+ hash = (hash * 16777619) ^ b;
}
} else {
- for (size_t i = 0; i < kRandomHashCount; ++i) {
+ // For larger arrays use the 2 bytes at 6 bytes (the location of a push registers
+ // instruction field for quick generated code on ARM) and then select a number of other
+ // values at random.
+ static const size_t kRandomHashCount = 16;
+ for (size_t i = 0; i < 2; ++i) {
+ uint8_t b = array[i + 6];
+ hash = (hash * 16777619) ^ b;
+ }
+ for (size_t i = 2; i < kRandomHashCount; ++i) {
size_t r = i * 1103515245 + 12345;
- hash = hash * 54 + array[r % array.size()];
+ uint8_t b = array[r % array.size()];
+ hash = (hash * 16777619) ^ b;
}
}
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
return hash;
}
};
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_code_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_mapping_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_vmap_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_code_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index f82c6fb40f..bcdc1c15c9 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -241,7 +241,7 @@ void ImageWriter::ComputeEagerResolvedStrings()
}
bool ImageWriter::IsImageClass(const Class* klass) {
- return compiler_driver_.IsImageClass(ClassHelper(klass).GetDescriptor());
+ return compiler_driver_.IsImageClass(ClassHelper(klass).GetDescriptorAsStringPiece());
}
struct NonImageClasses {
@@ -296,7 +296,7 @@ void ImageWriter::PruneNonImageClasses() {
bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
if (!context->image_writer->IsImageClass(klass)) {
- context->non_image_classes->insert(ClassHelper(klass).GetDescriptor());
+ context->non_image_classes->insert(ClassHelper(klass).GetDescriptorAsStringPiece().as_string());
}
return true;
}
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 43408a7d64..0c14346ad8 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -50,9 +50,9 @@ using ::art::llvm::runtime_support::JniMethodStartSynchronized;
using ::art::llvm::runtime_support::RuntimeId;
JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver& driver,
+ CompilerDriver* driver,
const DexCompilationUnit* dex_compilation_unit)
- : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()),
+ : cunit_(cunit), driver_(driver), module_(cunit_->GetModule()),
context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
dex_compilation_unit_(dex_compilation_unit),
func_(NULL), elf_func_idx_(0) {
diff --git a/compiler/jni/portable/jni_compiler.h b/compiler/jni/portable/jni_compiler.h
index d20c63bc1e..ffabfe61c2 100644
--- a/compiler/jni/portable/jni_compiler.h
+++ b/compiler/jni/portable/jni_compiler.h
@@ -54,7 +54,7 @@ class IRBuilder;
class JniCompiler {
public:
JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver& driver,
+ CompilerDriver* driver,
const DexCompilationUnit* dex_compilation_unit);
CompiledMethod* Compile();
@@ -67,7 +67,7 @@ class JniCompiler {
private:
LlvmCompilationUnit* cunit_;
- CompilerDriver* driver_;
+ CompilerDriver* const driver_;
::llvm::Module* module_;
::llvm::LLVMContext* context_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 1417fb9e40..b6b15f94eb 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -24,7 +24,6 @@
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
-#include "disassembler.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_internal.h"
#include "utils/assembler.h"
@@ -85,7 +84,6 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
// Assembler that holds generated instructions
UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
- bool should_disassemble = false;
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
@@ -366,10 +364,6 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
- if (should_disassemble) {
- UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
- disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
- }
return new CompiledMethod(compiler,
instruction_set,
managed_code,
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index a917cdc6de..d59afd48b7 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -26,6 +26,7 @@
#include "ir_builder.h"
#include "jni/portable/jni_compiler.h"
#include "llvm_compilation_unit.h"
+#include "thread-inl.h"
#include "utils_llvm.h"
#include "verifier/method_verifier.h"
@@ -164,7 +165,7 @@ CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
UniquePtr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
UniquePtr<JniCompiler> jni_compiler(
- new JniCompiler(cunit.get(), *compiler_driver_, dex_compilation_unit));
+ new JniCompiler(cunit.get(), compiler_driver_, dex_compilation_unit));
return jni_compiler->Compile();
}
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 4f6fa0a2df..b206a25f25 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -846,10 +846,10 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
uintptr_t direct_code = 0;
uintptr_t direct_method = 0;
bool is_fast_path = driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc,
- invoke_type, target_method,
- vtable_idx,
- direct_code, direct_method,
- true);
+ true, true,
+ &invoke_type, &target_method,
+ &vtable_idx,
+ &direct_code, &direct_method);
// Load the method object
llvm::Value* callee_method_object_addr = NULL;
@@ -1630,7 +1630,7 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
int field_offset;
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, is_volatile, false);
+ field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
@@ -1692,7 +1692,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
int field_offset;
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, is_volatile, true);
+ field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
@@ -1897,8 +1897,8 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
bool is_volatile;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, ssb_index,
- is_referrers_class, is_volatile, false);
+ field_idx, dex_compilation_unit_, false,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
llvm::Value* static_field_value;
@@ -1981,8 +1981,8 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
bool is_volatile;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, field_offset, ssb_index,
- is_referrers_class, is_volatile, true);
+ field_idx, dex_compilation_unit_, true,
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
if (!is_fast_path) {
llvm::Function* runtime_func;
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 139100bee9..aa439ccbae 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -214,6 +214,7 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
::llvm::TargetOptions target_options;
target_options.FloatABIType = ::llvm::FloatABI::Soft;
target_options.NoFramePointerElim = true;
+ target_options.NoFramePointerElimNonLeaf = true;
target_options.UseSoftFloat = false;
target_options.EnableFastISel = false;
@@ -257,7 +258,7 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
new ::llvm::tool_output_file(bitcode_filename_.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
+ ::llvm::raw_fd_ostream::F_Binary));
if (!errmsg.empty()) {
@@ -277,6 +278,7 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
// pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
// pm_builder.Inliner = ::llvm::createPartialInliningPass();
pm_builder.OptLevel = 3;
+ pm_builder.DisableSimplifyLibCalls = 1;
pm_builder.DisableUnitAtATime = 1;
pm_builder.populateFunctionPassManager(fpm);
pm_builder.populateModulePassManager(pm);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 74b5da9eff..9ed264288b 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -104,7 +104,7 @@ TEST_F(OatTest, WriteRead) {
ASSERT_TRUE(oat_file.get() != NULL);
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
- ASSERT_EQ(2U, oat_header.GetDexFileCount()); // core and conscrypt
+ ASSERT_EQ(1U, oat_header.GetDexFileCount()); // core
ASSERT_EQ(42U, oat_header.GetImageFileLocationOatChecksum());
ASSERT_EQ(4096U, oat_header.GetImageFileLocationOatDataBegin());
ASSERT_EQ("lue.art", oat_header.GetImageFileLocation());
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index f3d35d728c..53c1afa698 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -18,62 +18,65 @@
#define ART_COMPILER_UTILS_DEDUPE_SET_H_
#include <set>
+#include <string>
#include "base/mutex.h"
#include "base/stl_util.h"
namespace art {
-// A simple data structure to handle hashed deduplication. Add is thread safe.
-template <typename Key, typename HashType, typename HashFunc>
+// A set of Keys that support a HashFunc returning HashType. Used to find duplicates of Key in the
+// Add method. The data-structure is thread-safe through the use of internal locks, it also
+// supports the lock being sharded.
+template <typename Key, typename HashType, typename HashFunc, HashType kShard = 1>
class DedupeSet {
typedef std::pair<HashType, Key*> HashedKey;
class Comparator {
public:
bool operator()(const HashedKey& a, const HashedKey& b) const {
- if (a.first < b.first) return true;
- if (a.first > b.first) return true;
- return *a.second < *b.second;
+ if (a.first != b.first) {
+ return a.first < b.first;
+ } else {
+ return *a.second < *b.second;
+ }
}
};
- typedef std::set<HashedKey, Comparator> Keys;
-
public:
- typedef typename Keys::iterator iterator;
- typedef typename Keys::const_iterator const_iterator;
- typedef typename Keys::size_type size_type;
- typedef typename Keys::value_type value_type;
-
- iterator begin() { return keys_.begin(); }
- const_iterator begin() const { return keys_.begin(); }
- iterator end() { return keys_.end(); }
- const_iterator end() const { return keys_.end(); }
-
Key* Add(Thread* self, const Key& key) {
- HashType hash = HashFunc()(key);
- HashedKey hashed_key(hash, const_cast<Key*>(&key));
- MutexLock lock(self, lock_);
- auto it = keys_.find(hashed_key);
- if (it != keys_.end()) {
+ HashType raw_hash = HashFunc()(key);
+ HashType shard_hash = raw_hash / kShard;
+ HashType shard_bin = raw_hash % kShard;
+ HashedKey hashed_key(shard_hash, const_cast<Key*>(&key));
+ MutexLock lock(self, *lock_[shard_bin]);
+ auto it = keys_[shard_bin].find(hashed_key);
+ if (it != keys_[shard_bin].end()) {
return it->second;
}
hashed_key.second = new Key(key);
- keys_.insert(hashed_key);
+ keys_[shard_bin].insert(hashed_key);
return hashed_key.second;
}
- DedupeSet() : lock_("dedupe lock") {
+ explicit DedupeSet(const char* set_name) {
+ for (HashType i = 0; i < kShard; ++i) {
+ lock_name_[i] = StringPrintf("%s lock %d", set_name, i);
+ lock_[i].reset(new Mutex(lock_name_[i].c_str()));
+ }
}
~DedupeSet() {
- STLDeleteValues(&keys_);
+ for (HashType i = 0; i < kShard; ++i) {
+ STLDeleteValues(&keys_[i]);
+ }
}
private:
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- Keys keys_;
+ std::string lock_name_[kShard];
+ UniquePtr<Mutex> lock_[kShard];
+ std::set<HashedKey, Comparator> keys_[kShard];
+
DISALLOW_COPY_AND_ASSIGN(DedupeSet);
};
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 9f5e292f53..03d8b961fa 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -38,7 +38,7 @@ class DedupeHashFunc {
TEST_F(DedupeSetTest, Test) {
Thread* self = Thread::Current();
typedef std::vector<uint8_t> ByteArray;
- DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator;
+ DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator("test");
ByteArray* array1;
{
ByteArray test1;
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
new file mode 100644
index 0000000000..f8001a4524
--- /dev/null
+++ b/disassembler/Android.mk
@@ -0,0 +1,120 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.common.mk
+
+LIBART_DISASSEMBLER_SRC_FILES := \
+ disassembler.cc \
+ disassembler_arm.cc \
+ disassembler_mips.cc \
+ disassembler_x86.cc
+
+# $(1): target or host
+# $(2): ndebug or debug
+define build-libart-disassembler
+ ifneq ($(1),target)
+ ifneq ($(1),host)
+ $$(error expected target or host for argument 1, received $(1))
+ endif
+ endif
+ ifneq ($(2),ndebug)
+ ifneq ($(2),debug)
+ $$(error expected ndebug or debug for argument 2, received $(2))
+ endif
+ endif
+
+ art_target_or_host := $(1)
+ art_ndebug_or_debug := $(2)
+
+ include $(CLEAR_VARS)
+ ifeq ($$(art_target_or_host),target)
+ include external/stlport/libstlport.mk
+ else
+ LOCAL_IS_HOST_MODULE := true
+ endif
+ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+ ifeq ($$(art_ndebug_or_debug),ndebug)
+ LOCAL_MODULE := libart-disassembler
+ else # debug
+ LOCAL_MODULE := libartd-disassembler
+ endif
+
+ LOCAL_MODULE_TAGS := optional
+ LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+ LOCAL_SRC_FILES := $$(LIBART_DISASSEMBLER_SRC_FILES)
+
+ GENERATED_SRC_DIR := $$(call intermediates-dir-for,$$(LOCAL_MODULE_CLASS),$$(LOCAL_MODULE),$$(LOCAL_IS_HOST_MODULE),)
+
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
+ LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+ else # host
+ LOCAL_CLANG := $(ART_HOST_CLANG)
+ LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+ endif
+
+ LOCAL_SHARED_LIBRARIES += liblog
+ ifeq ($$(art_ndebug_or_debug),debug)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libartd
+ else
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libart
+ endif
+
+ LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+
+ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
+ LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_SHARED_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_DEVICE_BUILD_MK)
+ include $(BUILD_SHARED_LIBRARY)
+ else # host
+ LOCAL_STATIC_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_HOST_BUILD_MK)
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
+endef
+
+ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
+ $(eval $(call build-libart-disassembler,target,ndebug))
+endif
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ $(eval $(call build-libart-disassembler,target,debug))
+endif
+ifeq ($(WITH_HOST_DALVIK),true)
+ # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-libart-disassembler,host,ndebug))
+ endif
+ ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-libart-disassembler,host,debug))
+ endif
+endif
diff --git a/runtime/disassembler.cc b/disassembler/disassembler.cc
index 067083510b..067083510b 100644
--- a/runtime/disassembler.cc
+++ b/disassembler/disassembler.cc
diff --git a/runtime/disassembler.h b/disassembler/disassembler.h
index 805ff4d079..7547ab722b 100644
--- a/runtime/disassembler.h
+++ b/disassembler/disassembler.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_H_
-#define ART_RUNTIME_DISASSEMBLER_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_H_
#include <stdint.h>
@@ -45,4 +45,4 @@ class Disassembler {
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_H_
diff --git a/runtime/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 879d3ac71c..879d3ac71c 100644
--- a/runtime/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
diff --git a/runtime/disassembler_arm.h b/disassembler/disassembler_arm.h
index cab9150108..2e699ffe88 100644
--- a/runtime/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_ARM_H_
-#define ART_RUNTIME_DISASSEMBLER_ARM_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
#include <vector>
@@ -48,4 +48,4 @@ class DisassemblerArm : public Disassembler {
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_ARM_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_ARM_H_
diff --git a/runtime/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 25bbae68ef..25bbae68ef 100644
--- a/runtime/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
diff --git a/runtime/disassembler_mips.h b/disassembler/disassembler_mips.h
index e248503963..d3862676a0 100644
--- a/runtime/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_MIPS_H_
-#define ART_RUNTIME_DISASSEMBLER_MIPS_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
#include <vector>
@@ -37,4 +37,4 @@ class DisassemblerMips : public Disassembler {
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_MIPS_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
diff --git a/runtime/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index e5cdb7b297..e5cdb7b297 100644
--- a/runtime/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
diff --git a/runtime/disassembler_x86.h b/disassembler/disassembler_x86.h
index ff4322c8b8..9adaff7048 100644
--- a/runtime/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DISASSEMBLER_X86_H_
-#define ART_RUNTIME_DISASSEMBLER_X86_H_
+#ifndef ART_DISASSEMBLER_DISASSEMBLER_X86_H_
+#define ART_DISASSEMBLER_DISASSEMBLER_X86_H_
#include "disassembler.h"
@@ -35,4 +35,4 @@ class DisassemblerX86 : public Disassembler {
} // namespace x86
} // namespace art
-#endif // ART_RUNTIME_DISASSEMBLER_X86_H_
+#endif // ART_DISASSEMBLER_DISASSEMBLER_X86_H_
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index a63b229846..7cee00e182 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -22,17 +22,17 @@ OATDUMP_SRC_FILES := \
include art/build/Android.executable.mk
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils,,target,ndebug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libart-disassembler,art/disassembler,target,ndebug))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils,,target,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler,art/disassembler,target,debug))
endif
ifeq ($(WITH_HOST_DALVIK),true)
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),,,host,ndebug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler,art/disassembler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),,,host,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler,art/disassembler,host,debug))
endif
endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fc9e00c2cb..cc6b5d7866 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1119,7 +1119,7 @@ class ImageDumper {
typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable;
SizeAndCountTable sizes_and_counts;
- void Update(const std::string& descriptor, size_t object_bytes) {
+ void Update(const char* descriptor, size_t object_bytes) {
SizeAndCountTable::iterator it = sizes_and_counts.find(descriptor);
if (it != sizes_and_counts.end()) {
it->second.bytes += object_bytes;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a0ae4bffbc..5edf7592d9 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -38,10 +38,6 @@ LIBART_COMMON_SRC_FILES := \
dex_file.cc \
dex_file_verifier.cc \
dex_instruction.cc \
- disassembler.cc \
- disassembler_arm.cc \
- disassembler_mips.cc \
- disassembler_x86.cc \
elf_file.cc \
gc/allocator/dlmalloc.cc \
gc/accounting/card_table.cc \
@@ -64,6 +60,9 @@ LIBART_COMMON_SRC_FILES := \
instrumentation.cc \
intern_table.cc \
interpreter/interpreter.cc \
+ interpreter/interpreter_common.cc \
+ interpreter/interpreter_goto_table_impl.cc \
+ interpreter/interpreter_switch_impl.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
jdwp/jdwp_handler.cc \
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 9e6902de3f..e6e13be0b2 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -42,6 +42,13 @@ extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, voi
extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
@@ -133,6 +140,30 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+static bool quick_alloc_entry_points_instrumented = false;
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
+ quick_alloc_entry_points_instrumented = instrumented;
+}
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+ if (quick_alloc_entry_points_instrumented) {
+ qpoints->pAllocArray = art_quick_alloc_array_instrumented;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
+ qpoints->pAllocObject = art_quick_alloc_object_instrumented;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
+ } else {
+ qpoints->pAllocArray = art_quick_alloc_array;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+ qpoints->pAllocObject = art_quick_alloc_object;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ }
+}
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
// Interpreter
@@ -147,12 +178,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
// Alloc
- qpoints->pAllocArray = art_quick_alloc_array;
- qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
- qpoints->pAllocObject = art_quick_alloc_object;
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
- qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
- qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ ResetQuickAllocEntryPoints(qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index a77ce01562..5b2dd6c733 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -706,6 +706,17 @@ ENTRY art_quick_alloc_object
DELIVER_PENDING_EXCEPTION
END art_quick_alloc_object
+ .extern artAllocObjectFromCodeInstrumented
+ENTRY art_quick_alloc_object_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artAllocObjectFromCodeInstrumented @ (uint32_t type_idx, Method* method, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_object_instrumented
+
/*
* Called by managed code to allocate an object when the caller doesn't know whether it has
* access to the created type.
@@ -721,6 +732,17 @@ ENTRY art_quick_alloc_object_with_access_check
DELIVER_PENDING_EXCEPTION
END art_quick_alloc_object_with_access_check
+ .extern artAllocObjectFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_alloc_object_with_access_check_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artAllocObjectFromCodeWithAccessCheckInstrumented @ (uint32_t type_idx, Method* method, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_object_with_access_check_instrumented
+
/*
* Called by managed code to allocate an array.
*/
@@ -741,6 +763,23 @@ ENTRY art_quick_alloc_array
DELIVER_PENDING_EXCEPTION
END art_quick_alloc_array
+ .extern artAllocArrayFromCodeInstrumented
+ENTRY art_quick_alloc_array_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+ bl artAllocArrayFromCodeInstrumented
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_array_instrumented
+
/*
* Called by managed code to allocate an array when the caller doesn't know whether it has
* access to the created type.
@@ -762,6 +801,23 @@ ENTRY art_quick_alloc_array_with_access_check
DELIVER_PENDING_EXCEPTION
END art_quick_alloc_array_with_access_check
+ .extern artAllocArrayFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_alloc_array_with_access_check_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, component_count, Thread*, SP)
+ bl artAllocArrayFromCodeWithAccessCheckInstrumented
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_array_with_access_check_instrumented
+
/*
* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
*/
@@ -782,6 +838,23 @@ ENTRY art_quick_check_and_alloc_array
DELIVER_PENDING_EXCEPTION
END art_quick_check_and_alloc_array
+ .extern artCheckAndAllocArrayFromCodeInstrumented
+ENTRY art_quick_check_and_alloc_array_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
+ bl artCheckAndAllocArrayFromCodeInstrumented
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_check_and_alloc_array_instrumented
+
/*
* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
*/
@@ -802,6 +875,23 @@ ENTRY art_quick_check_and_alloc_array_with_access_check
DELIVER_PENDING_EXCEPTION
END art_quick_check_and_alloc_array_with_access_check
+ .extern artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_check_and_alloc_array_with_access_check_instrumented
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, count, Thread* , SP)
+ bl artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO
+ DELIVER_PENDING_EXCEPTION
+END art_quick_check_and_alloc_array_with_access_check_instrumented
+
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 40d7cd913c..3d08298151 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -41,6 +41,13 @@ extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, voi
extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
@@ -134,6 +141,30 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+static bool quick_alloc_entry_points_instrumented = false;
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
+ quick_alloc_entry_points_instrumented = instrumented;
+}
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+ if (quick_alloc_entry_points_instrumented) {
+ qpoints->pAllocArray = art_quick_alloc_array_instrumented;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
+ qpoints->pAllocObject = art_quick_alloc_object_instrumented;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
+ } else {
+ qpoints->pAllocArray = art_quick_alloc_array;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+ qpoints->pAllocObject = art_quick_alloc_object;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ }
+}
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
// Interpreter
@@ -148,12 +179,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
// Alloc
- qpoints->pAllocArray = art_quick_alloc_array;
- qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
- qpoints->pAllocObject = art_quick_alloc_object;
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
- qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
- qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ ResetQuickAllocEntryPoints(qpoints);
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3d63ccc612..f9b703f113 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -770,6 +770,16 @@ ENTRY art_quick_alloc_object
RETURN_IF_NONZERO
END art_quick_alloc_object
+ .extern artAllocObjectFromCodeInstrumented
+ENTRY art_quick_alloc_object_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ jal artAllocObjectFromCodeInstrumented # (uint32_t type_idx, Method* method, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_object_instrumented
+
/*
* Called by managed code to allocate an object when the caller doesn't know whether it has
* access to the created type.
@@ -784,6 +794,16 @@ ENTRY art_quick_alloc_object_with_access_check
RETURN_IF_NONZERO
END art_quick_alloc_object_with_access_check
+ .extern artAllocObjectFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_alloc_object_with_access_check_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ jal artAllocObjectFromCodeWithAccessCheckInstrumented # (uint32_t type_idx, Method* method, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_object_with_access_check_instrumented
+
/*
* Called by managed code to allocate an array.
*/
@@ -798,6 +818,17 @@ ENTRY art_quick_alloc_array
RETURN_IF_NONZERO
END art_quick_alloc_array
+ .extern artAllocArrayFromCodeInstrumented
+ENTRY art_quick_alloc_array_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp)
+ jal artAllocArrayFromCodeInstrumented
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_array_instrumented
+
/*
* Called by managed code to allocate an array when the caller doesn't know whether it has
* access to the created type.
@@ -813,6 +844,17 @@ ENTRY art_quick_alloc_array_with_access_check
RETURN_IF_NONZERO
END art_quick_alloc_array_with_access_check
+ .extern artAllocArrayFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_alloc_array_with_access_check_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, component_count, Thread*, $sp)
+ jal artAllocArrayFromCodeWithAccessCheckInstrumented
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_array_with_access_check_instrumented
+
/*
* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
*/
@@ -827,6 +869,17 @@ ENTRY art_quick_check_and_alloc_array
RETURN_IF_NONZERO
END art_quick_check_and_alloc_array
+ .extern artCheckAndAllocArrayFromCodeInstrumented
+ENTRY art_quick_check_and_alloc_array_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp)
+ jal artCheckAndAllocArrayFromCodeInstrumented
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_check_and_alloc_array_instrumented
+
/*
* Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
*/
@@ -841,6 +894,17 @@ ENTRY art_quick_check_and_alloc_array_with_access_check
RETURN_IF_NONZERO
END art_quick_check_and_alloc_array_with_access_check
+ .extern artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
+ENTRY art_quick_check_and_alloc_array_with_access_check_instrumented
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(type_idx, method, count, Thread* , $sp)
+ jal artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_check_and_alloc_array_with_access_check_instrumented
+
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index abc2990cc0..4c87e07608 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -40,6 +40,13 @@ extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, voi
extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check_instrumented(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_instrumented(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check_instrumented(uint32_t, void*, int32_t);
+
// Cast entrypoints.
extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
const mirror::Class* ref_class);
@@ -116,6 +123,30 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+static bool quick_alloc_entry_points_instrumented = false;
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented) {
+ quick_alloc_entry_points_instrumented = instrumented;
+}
+
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+ if (quick_alloc_entry_points_instrumented) {
+ qpoints->pAllocArray = art_quick_alloc_array_instrumented;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check_instrumented;
+ qpoints->pAllocObject = art_quick_alloc_object_instrumented;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check_instrumented;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array_instrumented;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check_instrumented;
+ } else {
+ qpoints->pAllocArray = art_quick_alloc_array;
+ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+ qpoints->pAllocObject = art_quick_alloc_object;
+ qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ }
+}
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
// Interpreter
@@ -130,12 +161,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
// Alloc
- qpoints->pAllocArray = art_quick_alloc_array;
- qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
- qpoints->pAllocObject = art_quick_alloc_object;
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
- qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
- qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
+ ResetQuickAllocEntryPoints(qpoints);
// Cast
qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index dbf552faaf..06b220391f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -389,6 +389,13 @@ THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCod
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_instrumented, artAllocObjectFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check_instrumented, artAllocObjectFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_instrumented, artAllocArrayFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check_instrumented, artAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_instrumented, artCheckAndAllocArrayFromCodeInstrumented, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check_instrumented, artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented, RETURN_IF_EAX_NOT_ZERO
+
TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 7e8365eaba..c0cfee2463 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -41,6 +41,54 @@ static inline int futex(volatile int *uaddr, int op, int val, const struct times
}
#endif // ART_USE_FUTEXES
+#if defined(__APPLE__)
+
+// This works on Mac OS 10.6 but hasn't been tested on older releases.
+struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ int padding1;
+ uint32_t padding2;
+ int16_t padding3;
+ int16_t padding4;
+ uint32_t padding5;
+ pthread_t darwin_pthread_mutex_owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ pthread_mutex_t padding1;
+ int padding2;
+ pthread_cond_t padding3;
+ pthread_cond_t padding4;
+ int padding5;
+ int padding6;
+ pthread_t darwin_pthread_rwlock_owner;
+ // ...other stuff we don't care about.
+};
+
+#endif // __APPLE__
+
+#if defined(__GLIBC__)
+
+struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
+ int32_t padding0[2];
+ int owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
+#ifdef __LP64__
+ int32_t padding0[6];
+#else
+ int32_t padding0[7];
+#endif
+ int writer;
+ // ...other stuff we don't care about.
+};
+
+#endif // __GLIBC__
+
class ScopedContentionRecorder {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
@@ -185,6 +233,84 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
#endif
}
+inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity debug check that if we think it is locked we have it in our held mutexes.
+ if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t Mutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ return exclusive_owner_;
+#elif defined(__BIONIC__)
+ return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
+#elif defined(__APPLE__)
+ const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
+ pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
+ // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
+ // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
+ if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+}
+
+inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity that if the pthread thinks we own the lock the Thread agrees.
+ if (self != NULL && result) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ int32_t state = state_;
+ if (state == 0) {
+ return 0; // No owner.
+ } else if (state > 0) {
+ return -1; // Shared.
+ } else {
+ return exclusive_owner_;
+ }
+#else
+#if defined(__BIONIC__)
+ return rwlock_.writerThreadId;
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
+#elif defined(__APPLE__)
+ const darwin_pthread_rwlock_t*
+ dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
+ pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
+ if (owner == (pthread_t)0) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+#endif
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_INL_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b99e7c9281..b048bbb1ec 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -31,54 +31,6 @@
namespace art {
-#if defined(__APPLE__)
-
-// This works on Mac OS 10.6 but hasn't been tested on older releases.
-struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- int padding1;
- uint32_t padding2;
- int16_t padding3;
- int16_t padding4;
- uint32_t padding5;
- pthread_t darwin_pthread_mutex_owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- pthread_mutex_t padding1;
- int padding2;
- pthread_cond_t padding3;
- pthread_cond_t padding4;
- int padding5;
- int padding6;
- pthread_t darwin_pthread_rwlock_owner;
- // ...other stuff we don't care about.
-};
-
-#endif // __APPLE__
-
-#if defined(__GLIBC__)
-
-struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
- int32_t padding0[2];
- int owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
-#ifdef __LP64__
- int32_t padding0[6];
-#else
- int32_t padding0[7];
-#endif
- int writer;
- // ...other stuff we don't care about.
-};
-
-#endif // __GLIBC__
-
#if ART_USE_FUTEXES
static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
@@ -346,7 +298,7 @@ void Mutex::ExclusiveLock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to 1.
done = android_atomic_acquire_cas(0, 1, &state_) == 0;
} else {
@@ -432,14 +384,14 @@ void Mutex::ExclusiveUnlock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 1) {
+ if (LIKELY(cur_state == 1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state to 0.
done = android_atomic_release_cas(cur_state, 0, &state_) == 0;
- if (done) { // Spurious fail?
+ if (LIKELY(done)) { // Spurious fail?
// Wake a contender
- if (num_contenders_ > 0) {
+ if (UNLIKELY(num_contenders_ > 0)) {
futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
@@ -461,41 +413,6 @@ void Mutex::ExclusiveUnlock(Thread* self) {
}
}
-bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
-uint64_t Mutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- return exclusive_owner_;
-#elif defined(__BIONIC__)
- return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
-#elif defined(__APPLE__)
- const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
- pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
- // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
- // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
- if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-}
-
void Mutex::Dump(std::ostream& os) const {
os << (recursive_ ? "recursive " : "non-recursive ")
<< name_
@@ -549,7 +466,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to -1.
done = android_atomic_acquire_cas(0, -1, &state_) == 0;
} else {
@@ -583,14 +500,14 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == -1) {
+ if (LIKELY(cur_state == -1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state from -1 to 0.
done = android_atomic_release_cas(-1, 0, &state_) == 0;
- if (done) { // cmpxchg may fail due to noise?
+ if (LIKELY(done)) { // cmpxchg may fail due to noise?
// Wake any waiters.
- if (num_pending_readers_ > 0 || num_pending_writers_ > 0) {
+ if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
}
}
@@ -687,18 +604,6 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) {
return true;
}
-bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
DCHECK(self == NULL || self == Thread::Current());
bool result;
@@ -710,37 +615,6 @@ bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
return result;
}
-uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- int32_t state = state_;
- if (state == 0) {
- return 0; // No owner.
- } else if (state > 0) {
- return -1; // Shared.
- } else {
- return exclusive_owner_;
- }
-#else
-#if defined(__BIONIC__)
- return rwlock_.writerThreadId;
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
-#elif defined(__APPLE__)
- const darwin_pthread_rwlock_t*
- dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
- pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
- if (owner == (pthread_t)0) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-#endif
-}
-
void ReaderWriterMutex::Dump(std::ostream& os) const {
os << name_
<< " level=" << static_cast<int>(level_)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 210386aebb..17a179f2d9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1083,13 +1083,14 @@ void ClassLinker::InitFromImage() {
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty) {
- visitor(class_roots_, arg);
+ class_roots_ = down_cast<mirror::ObjectArray<mirror::Class>*>(visitor(class_roots_, arg));
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
if (!only_dirty || dex_caches_dirty_) {
- for (mirror::DexCache* dex_cache : dex_caches_) {
- visitor(dex_cache, arg);
+ for (mirror::DexCache*& dex_cache : dex_caches_) {
+ dex_cache = down_cast<mirror::DexCache*>(visitor(dex_cache, arg));
+ DCHECK(dex_cache != nullptr);
}
if (clean_dirty) {
dex_caches_dirty_ = false;
@@ -1100,8 +1101,9 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, b
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
if (!only_dirty || class_table_dirty_) {
- for (const std::pair<size_t, mirror::Class*>& it : class_table_) {
- visitor(it.second, arg);
+ for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
+ it.second = down_cast<mirror::Class*>(visitor(it.second, arg));
+ DCHECK(it.second != nullptr);
}
if (clean_dirty) {
class_table_dirty_ = false;
@@ -1112,7 +1114,8 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, b
// handle image roots by using the MS/CMS rescanning of dirty cards.
}
- visitor(array_iftable_, arg);
+ array_iftable_ = reinterpret_cast<mirror::IfTable*>(visitor(array_iftable_, arg));
+ DCHECK(array_iftable_ != nullptr);
}
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) {
@@ -1837,7 +1840,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
SirtRef<mirror::Class>& klass) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
- StringPiece method_name(dex_file.GetMethodName(method_id));
+ StringPiece method_name(dex_file.StringDataAsStringPieceByIdx(method_id.name_idx_));
mirror::ArtMethod* dst = AllocArtMethod(self);
if (UNLIKELY(dst == NULL)) {
@@ -1866,7 +1869,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
klass->SetFinalizable();
} else {
ClassHelper kh(klass.get());
- StringPiece klass_descriptor(kh.GetDescriptor());
+ StringPiece klass_descriptor(kh.GetDescriptorAsStringPiece());
// The Enum class declares a "final" finalize() method to prevent subclasses from
// introducing a finalizer. We don't want to set the finalizable flag for Enum or its
// subclasses, so we exclude it here.
@@ -2216,7 +2219,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader*
++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
- if (strcmp(kh.GetDescriptor(), descriptor) == 0 && klass->GetClassLoader() == class_loader) {
+ if (kh.GetDescriptorAsStringPiece() == descriptor && klass->GetClassLoader() == class_loader) {
class_table_.erase(it);
return true;
}
@@ -2262,15 +2265,16 @@ mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
- if (klass->GetClassLoader() == class_loader && strcmp(descriptor, kh.GetDescriptor()) == 0) {
+ if (klass->GetClassLoader() == class_loader && kh.GetDescriptorAsStringPiece() == descriptor) {
if (kIsDebugBuild) {
// Check for duplicates in the table.
for (++it; it != end && it->first == hash; ++it) {
mirror::Class* klass2 = it->second;
kh.ChangeClass(klass2);
- CHECK(!(strcmp(descriptor, kh.GetDescriptor()) == 0 && klass2->GetClassLoader() == class_loader))
- << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
- << PrettyClass(klass2) << " " << klass2 << " " << klass2->GetClassLoader();
+ CHECK(!(kh.GetDescriptorAsStringPiece() == descriptor &&
+ klass2->GetClassLoader() == class_loader))
+ << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
+ << PrettyClass(klass2) << " " << klass2 << " " << klass2->GetClassLoader();
}
}
return klass;
@@ -2376,7 +2380,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
- if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
+ if (kh.GetDescriptorAsStringPiece() == descriptor) {
result.push_back(klass);
}
}
@@ -2535,11 +2539,11 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
CHECK(oat_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation());
CHECK(oat_dex_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
- const char* descriptor = ClassHelper(klass).GetDescriptor();
uint16_t class_def_index = klass->GetDexClassDefIndex();
UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(class_def_index));
CHECK(oat_class.get() != NULL)
- << dex_file.GetLocation() << " " << PrettyClass(klass) << " " << descriptor;
+ << dex_file.GetLocation() << " " << PrettyClass(klass) << " "
+ << ClassHelper(klass).GetDescriptor();
oat_file_class_status = oat_class->GetStatus();
if (oat_file_class_status == mirror::Class::kStatusVerified ||
oat_file_class_status == mirror::Class::kStatusInitialized) {
@@ -2578,7 +2582,8 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
return false;
}
LOG(FATAL) << "Unexpected class status: " << oat_file_class_status
- << " " << dex_file.GetLocation() << " " << PrettyClass(klass) << " " << descriptor;
+ << " " << dex_file.GetLocation() << " " << PrettyClass(klass) << " "
+ << ClassHelper(klass).GetDescriptor();
return false;
}
@@ -2818,12 +2823,12 @@ static void CheckProxyConstructor(mirror::ArtMethod* constructor)
CHECK(constructor->IsConstructor());
MethodHelper mh(constructor);
CHECK_STREQ(mh.GetName(), "<init>");
- CHECK_EQ(mh.GetSignature(), std::string("(Ljava/lang/reflect/InvocationHandler;)V"));
+ CHECK_STREQ(mh.GetSignature().ToString().c_str(), "(Ljava/lang/reflect/InvocationHandler;)V");
DCHECK(constructor->IsPublic());
}
mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRef<mirror::Class>& klass,
- SirtRef<mirror::ArtMethod>& prototype) {
+ SirtRef<mirror::ArtMethod>& prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
@@ -2887,7 +2892,7 @@ static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
}
if (!can_init_statics) {
// Check if there's a class initializer.
- mirror::ArtMethod* clinit = klass->FindDeclaredDirectMethod("<clinit>", "()V");
+ mirror::ArtMethod* clinit = klass->FindClassInitializer();
if (clinit != NULL) {
return false;
}
@@ -3034,7 +3039,7 @@ bool ClassLinker::InitializeClass(mirror::Class* klass, bool can_init_statics,
}
}
- mirror::ArtMethod* clinit = klass->FindDeclaredDirectMethod("<clinit>", "()V");
+ mirror::ArtMethod* clinit = klass->FindClassInitializer();
if (clinit != NULL) {
CHECK(can_init_statics);
if (LIKELY(Runtime::Current()->IsStarted())) {
@@ -3724,10 +3729,10 @@ struct LinkFieldsComparator {
// same basic group? then sort by string.
fh_->ChangeField(field1);
- StringPiece name1(fh_->GetName());
+ const char* name1 = fh_->GetName();
fh_->ChangeField(field2);
- StringPiece name2(fh_->GetName());
- return name1 < name2;
+ const char* name2 = fh_->GetName();
+ return strcmp(name1, name2) < 0;
}
FieldHelper* fh_;
@@ -3761,7 +3766,9 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
// minimizes disruption of C++ version such as Class and Method.
std::deque<mirror::ArtField*> grouped_and_sorted_fields;
for (size_t i = 0; i < num_fields; i++) {
- grouped_and_sorted_fields.push_back(fields->Get(i));
+ mirror::ArtField* f = fields->Get(i);
+ CHECK(f != NULL);
+ grouped_and_sorted_fields.push_back(f);
}
FieldHelper fh(NULL, this);
std::sort(grouped_and_sorted_fields.begin(),
@@ -3828,7 +3835,7 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
// We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
if (!is_static &&
- StringPiece(ClassHelper(klass.get(), this).GetDescriptor()) == "Ljava/lang/ref/Reference;") {
+ (ClassHelper(klass.get(), this).GetDescriptorAsStringPiece() == "Ljava/lang/ref/Reference;")) {
// We know there are no non-reference fields in the Reference classes, and we know
// that 'referent' is alphabetically last, so this is easy...
CHECK_EQ(num_reference_fields, num_fields);
@@ -3837,39 +3844,39 @@ bool ClassLinker::LinkFields(SirtRef<mirror::Class>& klass, bool is_static) {
--num_reference_fields;
}
-#ifndef NDEBUG
- // Make sure that all reference fields appear before
- // non-reference fields, and all double-wide fields are aligned.
- bool seen_non_ref = false;
- for (size_t i = 0; i < num_fields; i++) {
- mirror::ArtField* field = fields->Get(i);
- if (false) { // enable to debug field layout
- LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.get())
- << " field=" << PrettyField(field)
- << " offset=" << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()),
- false);
- }
- fh.ChangeField(field);
- Primitive::Type type = fh.GetTypeAsPrimitiveType();
- bool is_primitive = type != Primitive::kPrimNot;
- if (StringPiece(ClassHelper(klass.get(), this).GetDescriptor()) == "Ljava/lang/ref/Reference;" &&
- StringPiece(fh.GetName()) == "referent") {
- is_primitive = true; // We lied above, so we have to expect a lie here.
- }
- if (is_primitive) {
- if (!seen_non_ref) {
- seen_non_ref = true;
- DCHECK_EQ(num_reference_fields, i);
+ if (kIsDebugBuild) {
+ // Make sure that all reference fields appear before
+ // non-reference fields, and all double-wide fields are aligned.
+ bool seen_non_ref = false;
+ for (size_t i = 0; i < num_fields; i++) {
+ mirror::ArtField* field = fields->Get(i);
+ if (false) { // enable to debug field layout
+ LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
+ << " class=" << PrettyClass(klass.get())
+ << " field=" << PrettyField(field)
+ << " offset=" << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()),
+ false);
+ }
+ fh.ChangeField(field);
+ Primitive::Type type = fh.GetTypeAsPrimitiveType();
+ bool is_primitive = type != Primitive::kPrimNot;
+ if (ClassHelper(klass.get(), this).GetDescriptorAsStringPiece() == "Ljava/lang/ref/Reference;" &&
+ fh.GetNameAsStringPiece() == "referent") {
+ is_primitive = true; // We lied above, so we have to expect a lie here.
+ }
+ if (is_primitive) {
+ if (!seen_non_ref) {
+ seen_non_ref = true;
+ DCHECK_EQ(num_reference_fields, i);
+ }
+ } else {
+ DCHECK(!seen_non_ref);
}
- } else {
- DCHECK(!seen_non_ref);
+ }
+ if (!seen_non_ref) {
+ DCHECK_EQ(num_fields, num_reference_fields);
}
}
- if (!seen_non_ref) {
- DCHECK_EQ(num_fields, num_reference_fields);
- }
-#endif
size = field_offset.Uint32Value();
// Update klass
if (is_static) {
@@ -3985,11 +3992,11 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
}
mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
- uint32_t method_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader,
- const mirror::ArtMethod* referrer,
- InvokeType type) {
+ uint32_t method_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ const mirror::ArtMethod* referrer,
+ InvokeType type) {
DCHECK(dex_cache != NULL);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
@@ -4024,7 +4031,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
if (resolved == NULL) {
// Search by name, which works across dex files.
const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
- std::string signature(dex_file.CreateMethodSignature(method_id.proto_idx_, NULL));
+ const Signature signature = dex_file.GetMethodSignature(method_id);
switch (type) {
case kDirect: // Fall-through.
case kStatic:
@@ -4054,7 +4061,7 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
// We failed to find the method which means either an access error, an incompatible class
// change, or no such method. First try to find the method among direct and virtual methods.
const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
- std::string signature(dex_file.CreateMethodSignature(method_id.proto_idx_, NULL));
+ const Signature signature = dex_file.GetMethodSignature(method_id);
switch (type) {
case kDirect:
case kStatic:
@@ -4172,9 +4179,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
}
mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
- uint32_t field_idx,
- mirror::DexCache* dex_cache,
- mirror::ClassLoader* class_loader) {
+ uint32_t field_idx,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader) {
DCHECK(dex_cache != NULL);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
@@ -4187,8 +4194,9 @@ mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
return NULL;
}
- const char* name = dex_file.GetFieldName(field_id);
- const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ StringPiece name(dex_file.StringDataAsStringPieceByIdx(field_id.name_idx_));
+ StringPiece type(dex_file.StringDataAsStringPieceByIdx(
+ dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
resolved = klass->FindField(name, type);
if (resolved != NULL) {
dex_cache->SetResolvedField(field_idx, resolved);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index bbc2877b73..ad9347fee1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -152,7 +152,7 @@ class ClassLinkerTest : public CommonTest {
EXPECT_TRUE(method != NULL);
EXPECT_TRUE(method->GetClass() != NULL);
EXPECT_TRUE(mh.GetName() != NULL);
- EXPECT_TRUE(mh.GetSignature() != NULL);
+ EXPECT_TRUE(mh.GetSignature() != Signature::NoSignature());
EXPECT_TRUE(method->GetDexCacheStrings() != NULL);
EXPECT_TRUE(method->GetDexCacheResolvedMethods() != NULL);
@@ -340,8 +340,9 @@ class ClassLinkerTest : public CommonTest {
}
}
- static void TestRootVisitor(const mirror::Object* root, void*) {
+ static mirror::Object* TestRootVisitor(mirror::Object* root, void*) {
EXPECT_TRUE(root != NULL);
+ return root;
}
};
@@ -941,15 +942,16 @@ TEST_F(ClassLinkerTest, Interfaces) {
EXPECT_TRUE(K->IsAssignableFrom(B));
EXPECT_TRUE(J->IsAssignableFrom(B));
- mirror::ArtMethod* Ii = I->FindVirtualMethod("i", "()V");
- mirror::ArtMethod* Jj1 = J->FindVirtualMethod("j1", "()V");
- mirror::ArtMethod* Jj2 = J->FindVirtualMethod("j2", "()V");
- mirror::ArtMethod* Kj1 = K->FindInterfaceMethod("j1", "()V");
- mirror::ArtMethod* Kj2 = K->FindInterfaceMethod("j2", "()V");
- mirror::ArtMethod* Kk = K->FindInterfaceMethod("k", "()V");
- mirror::ArtMethod* Ai = A->FindVirtualMethod("i", "()V");
- mirror::ArtMethod* Aj1 = A->FindVirtualMethod("j1", "()V");
- mirror::ArtMethod* Aj2 = A->FindVirtualMethod("j2", "()V");
+ const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V");
+ mirror::ArtMethod* Ii = I->FindVirtualMethod("i", void_sig);
+ mirror::ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig);
+ mirror::ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig);
+ mirror::ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig);
+ mirror::ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig);
+ mirror::ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig);
+ mirror::ArtMethod* Ai = A->FindVirtualMethod("i", void_sig);
+ mirror::ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig);
+ mirror::ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig);
ASSERT_TRUE(Ii != NULL);
ASSERT_TRUE(Jj1 != NULL);
ASSERT_TRUE(Jj2 != NULL);
diff --git a/runtime/common_test.h b/runtime/common_test.h
index dc1f5922d9..fe54d0341d 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -286,12 +286,7 @@ class CommonTest : public testing::Test {
if (java_lang_dex_file_ == NULL) {
LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "'\n";
}
- conscrypt_file_ = DexFile::Open(GetConscryptFileName(), GetConscryptFileName());
- if (conscrypt_file_ == NULL) {
- LOG(FATAL) << "Could not open .dex file '" << GetConscryptFileName() << "'\n";
- }
boot_class_path_.push_back(java_lang_dex_file_);
- boot_class_path_.push_back(conscrypt_file_);
std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
@@ -398,10 +393,6 @@ class CommonTest : public testing::Test {
return GetDexFileName("core-libart");
}
- std::string GetConscryptFileName() {
- return GetDexFileName("conscrypt");
- }
-
std::string GetDexFileName(const std::string& jar_prefix) {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
@@ -520,7 +511,6 @@ class CommonTest : public testing::Test {
std::string android_data_;
std::string dalvik_cache_;
const DexFile* java_lang_dex_file_; // owned by runtime_
- const DexFile* conscrypt_file_; // owned by runtime_
std::vector<const DexFile*> boot_class_path_;
UniquePtr<Runtime> runtime_;
// Owned by the runtime
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 26ce5be1ec..189e3edc0f 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -265,7 +265,7 @@ void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
- const StringPiece& signature) {
+ const Signature& signature) {
std::ostringstream msg;
ClassHelper kh(c);
msg << "No " << type << " method " << name << signature
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 99c6343cdd..1d77e2d625 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -27,6 +27,7 @@ class ArtMethod;
class Class;
class Object;
} // namespace mirror
+class Signature;
class StringPiece;
class ThrowLocation;
@@ -140,7 +141,7 @@ void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
- const StringPiece& signature)
+ const Signature& signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowNoSuchMethodError(uint32_t method_idx)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 88269e5578..ae57aa34ec 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -891,7 +891,7 @@ JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* p
}
if (pDescriptor != NULL) {
- *pDescriptor = ClassHelper(c).GetDescriptor();
+ *pDescriptor = ClassHelper(c).GetDescriptorAsStringPiece().as_string();
}
return JDWP::ERR_NONE;
}
@@ -928,13 +928,13 @@ JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf*
return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string& signature) {
+JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
JDWP::JdwpError status;
mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
return status;
}
- signature = ClassHelper(c).GetDescriptor();
+ *signature = ClassHelper(c).GetDescriptorAsStringPiece().as_string();
return JDWP::ERR_NONE;
}
@@ -1065,8 +1065,8 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return JDWP::ERR_INVALID_LENGTH;
}
- std::string descriptor(ClassHelper(dst->GetClass()).GetDescriptor());
- JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
+ const char* descriptor = ClassHelper(dst->GetClass()).GetDescriptor();
+ JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor + 1);
if (IsPrimitiveTag(tag)) {
size_t width = GetTagWidth(tag);
@@ -1287,7 +1287,7 @@ JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_g
MethodHelper mh(m);
expandBufAddMethodId(pReply, ToMethodId(m));
expandBufAddUtf8String(pReply, mh.GetName());
- expandBufAddUtf8String(pReply, mh.GetSignature());
+ expandBufAddUtf8String(pReply, mh.GetSignature().ToString());
if (with_generic) {
static const char genericSignature[1] = "";
expandBufAddUtf8String(pReply, genericSignature);
@@ -2287,7 +2287,8 @@ void Dbg::PostClassPrepare(mirror::Class* c) {
// since the class may not yet be verified.
int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
JDWP::JdwpTypeTag tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
- gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), ClassHelper(c).GetDescriptor(), state);
+ gJdwpState->PostClassPrepare(tag, gRegistry->Add(c),
+ ClassHelper(c).GetDescriptorAsStringPiece().as_string(), state);
}
void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
@@ -3486,7 +3487,9 @@ void Dbg::SetAllocTrackingEnabled(bool enabled) {
recent_allocation_records_ = new AllocRecord[gAllocRecordMax];
CHECK(recent_allocation_records_ != NULL);
}
+ Runtime::Current()->InstrumentQuickAllocEntryPoints();
} else {
+ Runtime::Current()->UninstrumentQuickAllocEntryPoints();
delete[] recent_allocation_records_;
recent_allocation_records_ = NULL;
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index d0fe445df1..8574a3308f 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -149,7 +149,7 @@ class Dbg {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string& signature)
+ static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string& source_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index dee80269d6..c57a1e7582 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_DEX_FILE_INL_H_
#include "base/logging.h"
+#include "base/stringpiece.h"
#include "dex_file.h"
#include "leb128.h"
#include "utils.h"
@@ -36,6 +37,20 @@ inline const char* DexFile::GetStringDataAndLength(const StringId& string_id, ui
return reinterpret_cast<const char*>(ptr);
}
+inline StringPiece DexFile::StringDataAsStringPieceByIdx(uint32_t idx) const {
+ if (idx == kDexNoIndex) {
+ return StringPiece();
+ }
+ const StringId& string_id = GetStringId(idx);
+ uint32_t length;
+ const char* data = GetStringDataAndLength(string_id, &length);
+ return StringPiece(data, static_cast<int>(length));
+}
+
+inline const Signature DexFile::GetMethodSignature(const MethodId& method_id) const {
+ return Signature(this, GetProtoId(method_id.proto_idx_));
+}
+
inline const DexFile::TryItem* DexFile::GetTryItems(const CodeItem& code_item, uint32_t offset) {
const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_];
return reinterpret_cast<const TryItem*>
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e81c456ccf..275dcc5a03 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -38,7 +38,7 @@
#include "safe_map.h"
#include "thread.h"
#include "UniquePtr.h"
-#include "utf.h"
+#include "utf-inl.h"
#include "utils.h"
#include "well_known_classes.h"
#include "zip_archive.h"
@@ -503,8 +503,8 @@ const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
}
// Given a signature place the type ids into the given vector
-bool DexFile::CreateTypeList(uint16_t* return_type_idx, std::vector<uint16_t>* param_type_idxs,
- const std::string& signature) const {
+bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type_idx,
+ std::vector<uint16_t>* param_type_idxs) const {
if (signature[0] != '(') {
return false;
}
@@ -518,6 +518,7 @@ bool DexFile::CreateTypeList(uint16_t* return_type_idx, std::vector<uint16_t>* p
process_return = true;
continue;
}
+ // TODO: avoid building a string.
std::string descriptor;
descriptor += c;
while (c == '[') { // process array prefix
@@ -557,35 +558,18 @@ bool DexFile::CreateTypeList(uint16_t* return_type_idx, std::vector<uint16_t>* p
return false; // failed to correctly parse return type
}
-// Materializes the method descriptor for a method prototype. Method
-// descriptors are not stored directly in the dex file. Instead, one
-// must assemble the descriptor from references in the prototype.
-std::string DexFile::CreateMethodSignature(uint32_t proto_idx, int32_t* unicode_length) const {
- const ProtoId& proto_id = GetProtoId(proto_idx);
- std::string descriptor;
- descriptor.push_back('(');
- const TypeList* type_list = GetProtoParameters(proto_id);
- size_t parameter_length = 0;
- if (type_list != NULL) {
- // A non-zero number of arguments. Append the type names.
- for (size_t i = 0; i < type_list->Size(); ++i) {
- const TypeItem& type_item = type_list->GetTypeItem(i);
- uint32_t type_idx = type_item.type_idx_;
- uint32_t type_length;
- const char* name = StringByTypeIdx(type_idx, &type_length);
- parameter_length += type_length;
- descriptor.append(name);
- }
+const Signature DexFile::CreateSignature(const StringPiece& signature) const {
+ uint16_t return_type_idx;
+ std::vector<uint16_t> param_type_indices;
+ bool success = CreateTypeList(signature, &return_type_idx, &param_type_indices);
+ if (!success) {
+ return Signature::NoSignature();
}
- descriptor.push_back(')');
- uint32_t return_type_idx = proto_id.return_type_idx_;
- uint32_t return_type_length;
- const char* name = StringByTypeIdx(return_type_idx, &return_type_length);
- descriptor.append(name);
- if (unicode_length != NULL) {
- *unicode_length = parameter_length + return_type_length + 2; // 2 for ( and )
+ const ProtoId* proto_id = FindProtoId(return_type_idx, param_type_indices);
+ if (proto_id == NULL) {
+ return Signature::NoSignature();
}
- return descriptor;
+ return Signature(this, *proto_id);
}
int32_t DexFile::GetLineNumFromPC(const mirror::ArtMethod* method, uint32_t rel_pc) const {
@@ -831,6 +815,30 @@ bool DexFile::LineNumForPcCb(void* raw_context, uint32_t address, uint32_t line_
}
}
+std::string Signature::ToString() const {
+ if (dex_file_ == nullptr) {
+ CHECK(proto_id_ == nullptr);
+ return "<no signature>";
+ }
+ const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ std::string result;
+ if (params == nullptr) {
+ result += "()";
+ } else {
+ result += "(";
+ for (uint32_t i = 0; i < params->Size(); ++i) {
+ result += dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_);
+ }
+ result += ")";
+ }
+ result += dex_file_->StringByTypeIdx(proto_id_->return_type_idx_);
+ return result;
+}
+
+std::ostream& operator<<(std::ostream& os, const Signature& sig) {
+ return os << sig.ToString();
+}
+
// Decodes the header section from the class data bytes.
void ClassDataItemIterator::ReadClassDataHeader() {
CHECK(ptr_pos_ != NULL);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 7be5cb848f..40e4c72772 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -40,6 +40,8 @@ namespace mirror {
class DexCache;
} // namespace mirror
class ClassLinker;
+class Signature;
+class StringPiece;
class ZipArchive;
// TODO: move all of the macro functionality into the DexCache class.
@@ -432,6 +434,8 @@ class DexFile {
return GetStringDataAndLength(string_id, unicode_length);
}
+ StringPiece StringDataAsStringPieceByIdx(uint32_t idx) const;
+
const char* StringDataByIdx(uint32_t idx) const {
uint32_t unicode_length;
return StringDataAndLengthByIdx(idx, &unicode_length);
@@ -556,10 +560,8 @@ class DexFile {
return GetProtoId(method_id.proto_idx_);
}
- // Returns the signature of a method id.
- const std::string GetMethodSignature(const MethodId& method_id) const {
- return CreateMethodSignature(method_id.proto_idx_, NULL);
- }
+ // Returns a representation of the signature of a method id.
+ const Signature GetMethodSignature(const MethodId& method_id) const;
// Returns the name of a method id.
const char* GetMethodName(const MethodId& method_id) const {
@@ -653,15 +655,16 @@ class DexFile {
}
// Looks up a proto id for a given return type and signature type list
- const ProtoId* FindProtoId(uint16_t return_type_id,
+ const ProtoId* FindProtoId(uint16_t return_type_idx,
const std::vector<uint16_t>& signature_type_idxs_) const;
// Given a signature place the type ids into the given vector, returns true on success
- bool CreateTypeList(uint16_t* return_type_idx, std::vector<uint16_t>* param_type_idxs,
- const std::string& signature) const;
+ bool CreateTypeList(const StringPiece& signature, uint16_t* return_type_idx,
+ std::vector<uint16_t>* param_type_idxs) const;
- // Given a proto_idx decode the type list and return type into a method signature
- std::string CreateMethodSignature(uint32_t proto_idx, int32_t* unicode_length) const;
+ // Create a Signature from the given string signature or return Signature::NoSignature if not
+ // possible.
+ const Signature CreateSignature(const StringPiece& signature) const;
// Returns the short form method descriptor for the given prototype.
const char* GetShorty(uint32_t proto_idx) const {
@@ -939,6 +942,83 @@ class DexFileParameterIterator {
DISALLOW_IMPLICIT_CONSTRUCTORS(DexFileParameterIterator);
};
+// Abstract the signature of a method.
+class Signature {
+ public:
+ std::string ToString() const;
+
+ static Signature NoSignature() {
+ return Signature();
+ }
+
+ bool operator==(const Signature& rhs) const {
+ if (dex_file_ == nullptr) {
+ return rhs.dex_file_ == nullptr;
+ }
+ if (rhs.dex_file_ == nullptr) {
+ return false;
+ }
+ if (dex_file_ == rhs.dex_file_) {
+ return proto_id_ == rhs.proto_id_;
+ }
+ StringPiece shorty(dex_file_->StringDataAsStringPieceByIdx(proto_id_->shorty_idx_));
+ if (shorty != rhs.dex_file_->StringDataAsStringPieceByIdx(rhs.proto_id_->shorty_idx_)) {
+ return false; // Shorty mismatch.
+ }
+ if (shorty[0] == 'L') {
+ const DexFile::TypeId& return_type_id = dex_file_->GetTypeId(proto_id_->return_type_idx_);
+ const DexFile::TypeId& rhs_return_type_id =
+ rhs.dex_file_->GetTypeId(rhs.proto_id_->return_type_idx_);
+ if (dex_file_->StringDataAsStringPieceByIdx(return_type_id.descriptor_idx_) !=
+ rhs.dex_file_->StringDataAsStringPieceByIdx(rhs_return_type_id.descriptor_idx_)) {
+ return false; // Return type mismatch.
+ }
+ }
+ if (shorty.find('L', 1) != StringPiece::npos) {
+ const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
+ const DexFile::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
+ // Both lists are empty or have contents, or else shorty is broken.
+ DCHECK_EQ(params == nullptr, rhs_params == nullptr);
+ if (params != nullptr) {
+ uint32_t params_size = params->Size();
+ DCHECK_EQ(params_size, rhs_params->Size()); // Parameter list size must match.
+ for (uint32_t i = 0; i < params_size; ++i) {
+ const DexFile::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
+ const DexFile::TypeId& rhs_param_id =
+ rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
+ if (dex_file_->StringDataAsStringPieceByIdx(param_id.descriptor_idx_) !=
+ rhs.dex_file_->StringDataAsStringPieceByIdx(rhs_param_id.descriptor_idx_)) {
+ return false; // Parameter type mismatch.
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const Signature& rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator==(const StringPiece& rhs) const {
+ // TODO: Avoid temporary string allocation.
+ return ToString() == rhs;
+ }
+
+ private:
+ Signature(const DexFile* dex, const DexFile::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
+ }
+
+ Signature() : dex_file_(nullptr), proto_id_(nullptr) {
+ }
+
+ friend class DexFile;
+
+ const DexFile* const dex_file_;
+ const DexFile::ProtoId* const proto_id_;
+};
+std::ostream& operator<<(std::ostream& os, const Signature& sig);
+
// Iterate and decode class_data_item
class ClassDataItemIterator {
public:
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 32a8354d01..1b40529a08 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -137,14 +137,14 @@ TEST_F(DexFileTest, ClassDefs) {
EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c1));
}
-TEST_F(DexFileTest, CreateMethodSignature) {
+TEST_F(DexFileTest, GetMethodSignature) {
ScopedObjectAccess soa(Thread::Current());
- const DexFile* raw(OpenTestDexFile("CreateMethodSignature"));
+ const DexFile* raw(OpenTestDexFile("GetMethodSignature"));
ASSERT_TRUE(raw != NULL);
EXPECT_EQ(1U, raw->NumClassDefs());
const DexFile::ClassDef& class_def = raw->GetClassDef(0);
- ASSERT_STREQ("LCreateMethodSignature;", raw->GetClassDescriptor(class_def));
+ ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
const byte* class_data = raw->GetClassData(class_def);
ASSERT_TRUE(class_data != NULL);
@@ -156,11 +156,9 @@ TEST_F(DexFileTest, CreateMethodSignature) {
{
ASSERT_EQ(1U, it.NumDirectMethods());
const DexFile::MethodId& method_id = raw->GetMethodId(it.GetMemberIndex());
- uint32_t proto_idx = method_id.proto_idx_;
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ("<init>", name);
- int32_t length;
- std::string signature(raw->CreateMethodSignature(proto_idx, &length));
+ std::string signature(raw->GetMethodSignature(method_id).ToString());
ASSERT_EQ("()V", signature);
}
@@ -173,9 +171,7 @@ TEST_F(DexFileTest, CreateMethodSignature) {
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ("m1", name);
- uint32_t proto_idx = method_id.proto_idx_;
- int32_t length;
- std::string signature(raw->CreateMethodSignature(proto_idx, &length));
+ std::string signature(raw->GetMethodSignature(method_id).ToString());
ASSERT_EQ("(IDJLjava/lang/Object;)Ljava/lang/Float;", signature);
}
@@ -186,20 +182,18 @@ TEST_F(DexFileTest, CreateMethodSignature) {
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ("m2", name);
- uint32_t proto_idx = method_id.proto_idx_;
- int32_t length;
- std::string signature(raw->CreateMethodSignature(proto_idx, &length));
- ASSERT_EQ("(ZSC)LCreateMethodSignature;", signature);
+ std::string signature(raw->GetMethodSignature(method_id).ToString());
+ ASSERT_EQ("(ZSC)LGetMethodSignature;", signature);
}
}
TEST_F(DexFileTest, FindStringId) {
ScopedObjectAccess soa(Thread::Current());
- const DexFile* raw(OpenTestDexFile("CreateMethodSignature"));
+ const DexFile* raw(OpenTestDexFile("GetMethodSignature"));
ASSERT_TRUE(raw != NULL);
EXPECT_EQ(1U, raw->NumClassDefs());
- const char* strings[] = { "LCreateMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
+ const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
"D", "I", "J", NULL };
for (size_t i = 0; strings[i] != NULL; i++) {
const char* str = strings[i];
@@ -245,11 +239,10 @@ TEST_F(DexFileTest, FindMethodId) {
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
- int32_t length;
ASSERT_TRUE(found != NULL) << "Didn't find method " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name)
- << java_lang_dex_file_->CreateMethodSignature(to_find.proto_idx_, &length);
+ << java_lang_dex_file_->GetMethodSignature(to_find);
EXPECT_EQ(java_lang_dex_file_->GetIndexForMethodId(*found), i);
}
}
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 5b076e07b7..7dc2b3172e 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -21,7 +21,7 @@
#include "leb128.h"
#include "safe_map.h"
#include "UniquePtr.h"
-#include "utf.h"
+#include "utf-inl.h"
#include "utils.h"
#include "zip_archive.h"
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index 6e21273358..207b0b6795 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -24,29 +24,29 @@ namespace art {
//------------------------------------------------------------------------------
// VRegA
//------------------------------------------------------------------------------
-inline int8_t Instruction::VRegA_10t() const {
+inline int8_t Instruction::VRegA_10t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k10t);
- return static_cast<int8_t>(InstAA());
+ return static_cast<int8_t>(InstAA(inst_data));
}
-inline uint8_t Instruction::VRegA_10x() const {
+inline uint8_t Instruction::VRegA_10x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k10x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_11n() const {
+inline uint4_t Instruction::VRegA_11n(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11n);
- return InstA();
+ return InstA(inst_data);
}
-inline uint8_t Instruction::VRegA_11x() const {
+inline uint8_t Instruction::VRegA_11x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_12x() const {
+inline uint4_t Instruction::VRegA_12x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k12x);
- return InstA();
+ return InstA(inst_data);
}
inline int16_t Instruction::VRegA_20t() const {
@@ -54,54 +54,54 @@ inline int16_t Instruction::VRegA_20t() const {
return static_cast<int16_t>(Fetch16(1));
}
-inline uint8_t Instruction::VRegA_21c() const {
+inline uint8_t Instruction::VRegA_21c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21c);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21h() const {
+inline uint8_t Instruction::VRegA_21h(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21h);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21s() const {
+inline uint8_t Instruction::VRegA_21s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21s);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_21t() const {
+inline uint8_t Instruction::VRegA_21t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k21t);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_22b() const {
+inline uint8_t Instruction::VRegA_22b(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22b);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint4_t Instruction::VRegA_22c() const {
+inline uint4_t Instruction::VRegA_22c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22c);
- return InstA();
+ return InstA(inst_data);
}
-inline uint4_t Instruction::VRegA_22s() const {
+inline uint4_t Instruction::VRegA_22s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22s);
- return InstA();
+ return InstA(inst_data);
}
-inline uint4_t Instruction::VRegA_22t() const {
+inline uint4_t Instruction::VRegA_22t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22t);
- return InstA();
+ return InstA(inst_data);
}
-inline uint8_t Instruction::VRegA_22x() const {
+inline uint8_t Instruction::VRegA_22x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22x);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_23x() const {
+inline uint8_t Instruction::VRegA_23x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k23x);
- return InstAA();
+ return InstAA(inst_data);
}
inline int32_t Instruction::VRegA_30t() const {
@@ -109,19 +109,19 @@ inline int32_t Instruction::VRegA_30t() const {
return static_cast<int32_t>(Fetch32(1));
}
-inline uint8_t Instruction::VRegA_31c() const {
+inline uint8_t Instruction::VRegA_31c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31c);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_31i() const {
+inline uint8_t Instruction::VRegA_31i(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31i);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_31t() const {
+inline uint8_t Instruction::VRegA_31t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k31t);
- return InstAA();
+ return InstAA(inst_data);
}
inline uint16_t Instruction::VRegA_32x() const {
@@ -129,32 +129,32 @@ inline uint16_t Instruction::VRegA_32x() const {
return Fetch16(1);
}
-inline uint4_t Instruction::VRegA_35c() const {
+inline uint4_t Instruction::VRegA_35c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
- return InstB(); // This is labeled A in the spec.
+ return InstB(inst_data); // This is labeled A in the spec.
}
-inline uint8_t Instruction::VRegA_3rc() const {
+inline uint8_t Instruction::VRegA_3rc(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k3rc);
- return InstAA();
+ return InstAA(inst_data);
}
-inline uint8_t Instruction::VRegA_51l() const {
+inline uint8_t Instruction::VRegA_51l(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k51l);
- return InstAA();
+ return InstAA(inst_data);
}
//------------------------------------------------------------------------------
// VRegB
//------------------------------------------------------------------------------
-inline int4_t Instruction::VRegB_11n() const {
+inline int4_t Instruction::VRegB_11n(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k11n);
- return static_cast<int4_t>((InstB() << 28) >> 28);
+ return static_cast<int4_t>((InstB(inst_data) << 28) >> 28);
}
-inline uint4_t Instruction::VRegB_12x() const {
+inline uint4_t Instruction::VRegB_12x(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k12x);
- return InstB();
+ return InstB(inst_data);
}
inline uint16_t Instruction::VRegB_21c() const {
@@ -182,19 +182,19 @@ inline uint8_t Instruction::VRegB_22b() const {
return static_cast<uint8_t>(Fetch16(1) & 0xff);
}
-inline uint4_t Instruction::VRegB_22c() const {
+inline uint4_t Instruction::VRegB_22c(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22c);
- return InstB();
+ return InstB(inst_data);
}
-inline uint4_t Instruction::VRegB_22s() const {
+inline uint4_t Instruction::VRegB_22s(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22s);
- return InstB();
+ return InstB(inst_data);
}
-inline uint4_t Instruction::VRegB_22t() const {
+inline uint4_t Instruction::VRegB_22t(uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k22t);
- return InstB();
+ return InstB(inst_data);
}
inline uint16_t Instruction::VRegB_22x() const {
@@ -281,7 +281,7 @@ inline uint16_t Instruction::VRegC_3rc() const {
return Fetch16(2);
}
-inline void Instruction::GetArgs(uint32_t arg[5]) const {
+inline void Instruction::GetArgs(uint32_t arg[5], uint16_t inst_data) const {
DCHECK_EQ(FormatOf(Opcode()), k35c);
/*
@@ -295,7 +295,8 @@ inline void Instruction::GetArgs(uint32_t arg[5]) const {
* method constant (or equivalent) is always in vB.
*/
uint16_t regList = Fetch16(2);
- uint4_t count = InstB(); // This is labeled A in the spec.
+ uint4_t count = InstB(inst_data); // This is labeled A in the spec.
+ DCHECK_LE(count, 5U) << "Invalid arg count in 35c (" << count << ")";
/*
* Copy the argument registers into the arg[] array, and
@@ -305,15 +306,13 @@ inline void Instruction::GetArgs(uint32_t arg[5]) const {
* copies of those.) Note that cases 5..2 fall through.
*/
switch (count) {
- case 5: arg[4] = InstA();
+ case 5: arg[4] = InstA(inst_data);
case 4: arg[3] = (regList >> 12) & 0x0f;
case 3: arg[2] = (regList >> 8) & 0x0f;
case 2: arg[1] = (regList >> 4) & 0x0f;
case 1: arg[0] = regList & 0x0f; break;
- case 0: break; // Valid, but no need to do anything.
- default:
- LOG(ERROR) << "Invalid arg count in 35c (" << count << ")";
- return;
+ default: // case 0
+ break; // Valid, but no need to do anything.
}
}
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 13b0f1c270..c434cdd938 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -217,44 +217,122 @@ class Instruction {
// VRegA
bool HasVRegA() const;
int32_t VRegA() const;
- int8_t VRegA_10t() const;
- uint8_t VRegA_10x() const;
- uint4_t VRegA_11n() const;
- uint8_t VRegA_11x() const;
- uint4_t VRegA_12x() const;
+
+ int8_t VRegA_10t() const {
+ return VRegA_10t(Fetch16(0));
+ }
+ uint8_t VRegA_10x() const {
+ return VRegA_10x(Fetch16(0));
+ }
+ uint4_t VRegA_11n() const {
+ return VRegA_11n(Fetch16(0));
+ }
+ uint8_t VRegA_11x() const {
+ return VRegA_11x(Fetch16(0));
+ }
+ uint4_t VRegA_12x() const {
+ return VRegA_12x(Fetch16(0));
+ }
int16_t VRegA_20t() const;
- uint8_t VRegA_21c() const;
- uint8_t VRegA_21h() const;
- uint8_t VRegA_21s() const;
- uint8_t VRegA_21t() const;
- uint8_t VRegA_22b() const;
- uint4_t VRegA_22c() const;
- uint4_t VRegA_22s() const;
- uint4_t VRegA_22t() const;
- uint8_t VRegA_22x() const;
- uint8_t VRegA_23x() const;
+ uint8_t VRegA_21c() const {
+ return VRegA_21c(Fetch16(0));
+ }
+ uint8_t VRegA_21h() const {
+ return VRegA_21h(Fetch16(0));
+ }
+ uint8_t VRegA_21s() const {
+ return VRegA_21s(Fetch16(0));
+ }
+ uint8_t VRegA_21t() const {
+ return VRegA_21t(Fetch16(0));
+ }
+ uint8_t VRegA_22b() const {
+ return VRegA_22b(Fetch16(0));
+ }
+ uint4_t VRegA_22c() const {
+ return VRegA_22c(Fetch16(0));
+ }
+ uint4_t VRegA_22s() const {
+ return VRegA_22s(Fetch16(0));
+ }
+ uint4_t VRegA_22t() const {
+ return VRegA_22t(Fetch16(0));
+ }
+ uint8_t VRegA_22x() const {
+ return VRegA_22x(Fetch16(0));
+ }
+ uint8_t VRegA_23x() const {
+ return VRegA_23x(Fetch16(0));
+ }
int32_t VRegA_30t() const;
- uint8_t VRegA_31c() const;
- uint8_t VRegA_31i() const;
- uint8_t VRegA_31t() const;
+ uint8_t VRegA_31c() const {
+ return VRegA_31c(Fetch16(0));
+ }
+ uint8_t VRegA_31i() const {
+ return VRegA_31i(Fetch16(0));
+ }
+ uint8_t VRegA_31t() const {
+ return VRegA_31t(Fetch16(0));
+ }
uint16_t VRegA_32x() const;
- uint4_t VRegA_35c() const;
- uint8_t VRegA_3rc() const;
- uint8_t VRegA_51l() const;
+ uint4_t VRegA_35c() const {
+ return VRegA_35c(Fetch16(0));
+ }
+ uint8_t VRegA_3rc() const {
+ return VRegA_3rc(Fetch16(0));
+ }
+ uint8_t VRegA_51l() const {
+ return VRegA_51l(Fetch16(0));
+ }
+
+ // The following methods return the vA operand for various instruction formats. The "inst_data"
+ // parameter holds the first 16 bits of instruction which the returned value is decoded from.
+ int8_t VRegA_10t(uint16_t inst_data) const;
+ uint8_t VRegA_10x(uint16_t inst_data) const;
+ uint4_t VRegA_11n(uint16_t inst_data) const;
+ uint8_t VRegA_11x(uint16_t inst_data) const;
+ uint4_t VRegA_12x(uint16_t inst_data) const;
+ uint8_t VRegA_21c(uint16_t inst_data) const;
+ uint8_t VRegA_21h(uint16_t inst_data) const;
+ uint8_t VRegA_21s(uint16_t inst_data) const;
+ uint8_t VRegA_21t(uint16_t inst_data) const;
+ uint8_t VRegA_22b(uint16_t inst_data) const;
+ uint4_t VRegA_22c(uint16_t inst_data) const;
+ uint4_t VRegA_22s(uint16_t inst_data) const;
+ uint4_t VRegA_22t(uint16_t inst_data) const;
+ uint8_t VRegA_22x(uint16_t inst_data) const;
+ uint8_t VRegA_23x(uint16_t inst_data) const;
+ uint8_t VRegA_31c(uint16_t inst_data) const;
+ uint8_t VRegA_31i(uint16_t inst_data) const;
+ uint8_t VRegA_31t(uint16_t inst_data) const;
+ uint4_t VRegA_35c(uint16_t inst_data) const;
+ uint8_t VRegA_3rc(uint16_t inst_data) const;
+ uint8_t VRegA_51l(uint16_t inst_data) const;
// VRegB
bool HasVRegB() const;
int32_t VRegB() const;
- int4_t VRegB_11n() const;
- uint4_t VRegB_12x() const;
+
+ int4_t VRegB_11n() const {
+ return VRegB_11n(Fetch16(0));
+ }
+ uint4_t VRegB_12x() const {
+ return VRegB_12x(Fetch16(0));
+ }
uint16_t VRegB_21c() const;
uint16_t VRegB_21h() const;
int16_t VRegB_21s() const;
int16_t VRegB_21t() const;
uint8_t VRegB_22b() const;
- uint4_t VRegB_22c() const;
- uint4_t VRegB_22s() const;
- uint4_t VRegB_22t() const;
+ uint4_t VRegB_22c() const {
+ return VRegB_22c(Fetch16(0));
+ }
+ uint4_t VRegB_22s() const {
+ return VRegB_22s(Fetch16(0));
+ }
+ uint4_t VRegB_22t() const {
+ return VRegB_22t(Fetch16(0));
+ }
uint16_t VRegB_22x() const;
uint8_t VRegB_23x() const;
uint32_t VRegB_31c() const;
@@ -265,9 +343,19 @@ class Instruction {
uint16_t VRegB_3rc() const;
uint64_t VRegB_51l() const; // vB_wide
+ // The following methods return the vB operand for all instruction formats where it is encoded in
+ // the first 16 bits of instruction. The "inst_data" parameter holds these 16 bits. The returned
+ // value is decoded from it.
+ int4_t VRegB_11n(uint16_t inst_data) const;
+ uint4_t VRegB_12x(uint16_t inst_data) const;
+ uint4_t VRegB_22c(uint16_t inst_data) const;
+ uint4_t VRegB_22s(uint16_t inst_data) const;
+ uint4_t VRegB_22t(uint16_t inst_data) const;
+
// VRegC
bool HasVRegC() const;
int32_t VRegC() const;
+
int8_t VRegC_22b() const;
uint16_t VRegC_22c() const;
int16_t VRegC_22s() const;
@@ -277,11 +365,21 @@ class Instruction {
uint16_t VRegC_3rc() const;
// Fills the given array with the 'arg' array of the instruction.
- void GetArgs(uint32_t args[5]) const;
+ void GetArgs(uint32_t args[5], uint16_t inst_data) const;
+ void GetArgs(uint32_t args[5]) const {
+ return GetArgs(args, Fetch16(0));
+ }
+
+ // Returns the opcode field of the instruction. The given "inst_data" parameter must be the first
+ // 16 bits of instruction.
+ Code Opcode(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<Code>(inst_data & 0xFF);
+ }
- // Returns the opcode field of the instruction.
+ // Returns the opcode field of the instruction from the first 16 bits of instruction.
Code Opcode() const {
- return static_cast<Code>(Fetch16(0) & 0xFF);
+ return Opcode(Fetch16(0));
}
void SetOpcode(Code opcode) {
@@ -395,28 +493,43 @@ class Instruction {
// Dump code_units worth of this instruction, padding to code_units for shorter instructions
std::string DumpHex(size_t code_units) const;
- private:
- size_t SizeInCodeUnitsComplexOpcode() const;
-
uint16_t Fetch16(size_t offset) const {
const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
return insns[offset];
}
+ private:
+ size_t SizeInCodeUnitsComplexOpcode() const;
+
uint32_t Fetch32(size_t offset) const {
return (Fetch16(offset) | ((uint32_t) Fetch16(offset + 1) << 16));
}
uint4_t InstA() const {
- return static_cast<uint4_t>((Fetch16(0) >> 8) & 0x0f);
+ return InstA(Fetch16(0));
}
uint4_t InstB() const {
- return static_cast<uint4_t>(Fetch16(0) >> 12);
+ return InstB(Fetch16(0));
}
uint8_t InstAA() const {
- return static_cast<uint8_t>(Fetch16(0) >> 8);
+ return InstAA(Fetch16(0));
+ }
+
+ uint4_t InstA(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint4_t>((inst_data >> 8) & 0x0f);
+ }
+
+ uint4_t InstB(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint4_t>(inst_data >> 12);
+ }
+
+ uint8_t InstAA(uint16_t inst_data) const {
+ DCHECK_EQ(inst_data, Fetch16(0));
+ return static_cast<uint8_t>(inst_data >> 8);
}
static const char* const kInstructionNames[];
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 52f8c81ab6..d9c9e3141a 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -33,20 +33,20 @@
namespace art {
-// Helper function to allocate array for FILLED_NEW_ARRAY.
-mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* referrer,
- int32_t component_count, Thread* self,
- bool access_check) {
+static inline bool CheckFilledNewArrayAlloc(uint32_t type_idx, mirror::ArtMethod* referrer,
+ int32_t component_count, Thread* self,
+ bool access_check, mirror::Class** klass_ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
- return NULL; // Failure
+ return false; // Failure
}
- mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
if (klass == NULL) { // Error
DCHECK(self->IsExceptionPending());
- return NULL; // Failure
+ return false; // Failure
}
}
if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) {
@@ -60,18 +60,40 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod*
"Found type %s; filled-new-array not implemented for anything but \'int\'",
PrettyDescriptor(klass).c_str());
}
- return NULL; // Failure
- } else {
- if (access_check) {
- mirror::Class* referrer_klass = referrer->GetDeclaringClass();
- if (UNLIKELY(!referrer_klass->CanAccess(klass))) {
- ThrowIllegalAccessErrorClass(referrer_klass, klass);
- return NULL; // Failure
- }
+ return false; // Failure
+ }
+ if (access_check) {
+ mirror::Class* referrer_klass = referrer->GetDeclaringClass();
+ if (UNLIKELY(!referrer_klass->CanAccess(klass))) {
+ ThrowIllegalAccessErrorClass(referrer_klass, klass);
+ return false; // Failure
}
- DCHECK(klass->IsArrayClass()) << PrettyClass(klass);
- return mirror::Array::Alloc(self, klass, component_count);
}
+ DCHECK(klass->IsArrayClass()) << PrettyClass(klass);
+ *klass_ptr = klass;
+ return true;
+}
+
+// Helper function to allocate array for FILLED_NEW_ARRAY.
+mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* referrer,
+ int32_t component_count, Thread* self,
+ bool access_check) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
+ return NULL;
+ }
+ return mirror::Array::AllocUninstrumented(self, klass, component_count);
+}
+
+// Helper function to allocate array for FILLED_NEW_ARRAY.
+mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* referrer,
+ int32_t component_count, Thread* self,
+ bool access_check) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self, access_check, &klass))) {
+ return NULL;
+ }
+ return mirror::Array::AllocInstrumented(self, klass, component_count);
}
mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirror::ArtMethod* referrer,
@@ -405,5 +427,4 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char
return zero;
}
}
-
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8b58cb332c..e87dc96c97 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -16,7 +16,8 @@
#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
-#include "object_utils.h"
+
+#include "base/macros.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -27,6 +28,7 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/throwable.h"
+#include "object_utils.h"
#include "thread.h"
@@ -38,21 +40,18 @@ namespace mirror {
class Object;
} // namespace mirror
-// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
-// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::ArtMethod* method,
- Thread* self,
- bool access_check)
+static inline bool CheckObjectAlloc(uint32_t type_idx, mirror::ArtMethod* method,
+ Thread* self,
+ bool access_check,
+ mirror::Class** klass_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
Runtime* runtime = Runtime::Current();
if (UNLIKELY(klass == NULL)) {
klass = runtime->GetClassLinker()->ResolveType(type_idx, method);
if (klass == NULL) {
DCHECK(self->IsExceptionPending());
- return NULL; // Failure
+ return false; // Failure
}
}
if (access_check) {
@@ -60,40 +59,63 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::Art
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;",
PrettyDescriptor(klass).c_str());
- return NULL; // Failure
+ return false; // Failure
}
mirror::Class* referrer = method->GetDeclaringClass();
if (UNLIKELY(!referrer->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referrer, klass);
- return NULL; // Failure
+ return false; // Failure
}
}
if (!klass->IsInitialized() &&
!runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) {
DCHECK(self->IsExceptionPending());
- return NULL; // Failure
+ return false; // Failure
}
- return klass->AllocObject(self);
+ *klass_ptr = klass;
+ return true;
}
-// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
-// it cannot be resolved, throw an error. If it can, use it to create an array.
+// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
+// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self, bool access_check)
+static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::ArtMethod* method,
+ Thread* self,
+ bool access_check)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
+ return NULL;
+ }
+ return klass->AllocObjectUninstrumented(self);
+}
+
+static inline mirror::Object* AllocObjectFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
+ Thread* self,
+ bool access_check)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckObjectAlloc(type_idx, method, self, access_check, &klass))) {
+ return NULL;
+ }
+ return klass->AllocObjectInstrumented(self);
+}
+
+static inline bool CheckArrayAlloc(uint32_t type_idx, mirror::ArtMethod* method,
+ int32_t component_count,
+ bool access_check, mirror::Class** klass_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
- return NULL; // Failure
+ return false; // Failure
}
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
if (klass == NULL) { // Error
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL; // Failure
+ return false; // Failure
}
CHECK(klass->IsArrayClass()) << PrettyClass(klass);
}
@@ -101,10 +123,37 @@ static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::ArtMe
mirror::Class* referrer = method->GetDeclaringClass();
if (UNLIKELY(!referrer->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referrer, klass);
- return NULL; // Failure
+ return false; // Failure
}
}
- return mirror::Array::Alloc(self, klass, component_count);
+ *klass_ptr = klass;
+ return true;
+}
+
+// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
+// it cannot be resolved, throw an error. If it can, use it to create an array.
+// When verification/compiler hasn't been able to verify access, optionally perform an access
+// check.
+static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self, bool access_check)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
+ return NULL;
+ }
+ return mirror::Array::AllocUninstrumented(self, klass, component_count);
+}
+
+static inline mirror::Array* AllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self, bool access_check)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* klass;
+ if (UNLIKELY(!CheckArrayAlloc(type_idx, method, component_count, access_check, &klass))) {
+ return NULL;
+ }
+ return mirror::Array::AllocInstrumented(self, klass, component_count);
}
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
@@ -112,6 +161,11 @@ extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtM
Thread* self, bool access_check)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self, bool access_check)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Type of find field operation for fast and slow case.
enum FindFieldType {
InstanceObjectRead,
@@ -416,6 +470,23 @@ static inline void* GetJniDlsymLookupStub() {
return reinterpret_cast<void*>(art_jni_dlsym_lookup_stub);
}
+template <typename INT_TYPE, typename FLOAT_TYPE>
+static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
+ const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
+ const INT_TYPE kMinInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::min());
+ const FLOAT_TYPE kMaxIntAsFloat = static_cast<FLOAT_TYPE>(kMaxInt);
+ const FLOAT_TYPE kMinIntAsFloat = static_cast<FLOAT_TYPE>(kMinInt);
+ if (LIKELY(f > kMinIntAsFloat)) {
+ if (LIKELY(f < kMaxIntAsFloat)) {
+ return static_cast<INT_TYPE>(f);
+ } else {
+ return kMaxInt;
+ }
+ } else {
+ return (f != f) ? 0 : kMinInt; // f != f implies NaN
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index ecf98bc722..05c02f22fa 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -31,7 +31,15 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& m
mirror::ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
- Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true);
+ mirror::Class* declaringClass = method->GetDeclaringClass();
+ if (UNLIKELY(!declaringClass->IsInitializing())) {
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass,
+ true, true))) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return;
+ }
+ CHECK(declaringClass->IsInitializing());
+ }
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
#if defined(ART_USE_PORTABLE_COMPILER)
@@ -40,7 +48,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& m
method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
#else
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
- (shadow_frame->NumberOfVRegs() - arg_offset) * 4,
+ (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, mh.GetShorty()[0]);
#endif
}
diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc
index 31d13c8cd5..b839b6317d 100644
--- a/runtime/entrypoints/math_entrypoints.cc
+++ b/runtime/entrypoints/math_entrypoints.cc
@@ -16,6 +16,8 @@
#include "math_entrypoints.h"
+#include "entrypoint_utils.h"
+
namespace art {
extern "C" double art_l2d(int64_t l) {
@@ -31,59 +33,19 @@ extern "C" float art_l2f(int64_t l) {
* target doesn't support this normally, use these.
*/
extern "C" int64_t art_d2l(double d) {
- static const double kMaxLong = static_cast<double>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const double kMinLong = static_cast<double>(static_cast<int64_t>(0x8000000000000000ULL));
- if (d >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (d <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(d);
- }
+ return art_float_to_integral<int64_t, double>(d);
}
extern "C" int64_t art_f2l(float f) {
- static const float kMaxLong = static_cast<float>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const float kMinLong = static_cast<float>(static_cast<int64_t>(0x8000000000000000ULL));
- if (f >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (f <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(f);
- }
+ return art_float_to_integral<int64_t, float>(f);
}
extern "C" int32_t art_d2i(double d) {
- static const double kMaxInt = static_cast<double>(static_cast<int32_t>(0x7fffffffUL));
- static const double kMinInt = static_cast<double>(static_cast<int32_t>(0x80000000UL));
- if (d >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (d <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(d);
- }
+ return art_float_to_integral<int32_t, double>(d);
}
extern "C" int32_t art_f2i(float f) {
- static const float kMaxInt = static_cast<float>(static_cast<int32_t>(0x7fffffffUL));
- static const float kMinInt = static_cast<float>(static_cast<int32_t>(0x80000000UL));
- if (f >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (f <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(f);
- }
+ return art_float_to_integral<int32_t, float>(f);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 420e63a1bb..6f7b1ab19b 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -76,4 +76,57 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t
return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true);
}
+extern "C" mirror::Object* artAllocObjectFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
+ Thread* self, mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocObjectFromCodeInstrumented(type_idx, method, self, false);
+}
+
+extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocObjectFromCodeInstrumented(type_idx, method, self, true);
+}
+
+extern "C" mirror::Array* artAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* method,
+ int32_t component_count, Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocArrayFromCodeInstrumented(type_idx, method, component_count, self, false);
+}
+
+extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocArrayFromCodeInstrumented(type_idx, method, component_count, self, true);
+}
+
+extern "C" mirror::Array* artCheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count, Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return CheckAndAllocArrayFromCodeInstrumented(type_idx, method, component_count, self, false);
+}
+
+extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheckInstrumented(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return CheckAndAllocArrayFromCodeInstrumented(type_idx, method, component_count, self, true);
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index cb486d5827..12291c39a9 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -423,13 +423,23 @@ class RememberFoGcArgumentVisitor : public QuickArgumentVisitor {
virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsParamAReference()) {
- soa_->AddLocalReference<jobject>(*reinterpret_cast<mirror::Object**>(GetParamAddress()));
+ mirror::Object** param_address = reinterpret_cast<mirror::Object**>(GetParamAddress());
+ jobject reference =
+ soa_->AddLocalReference<jobject>(*param_address);
+ references_.push_back(std::make_pair(reference, param_address));
+ }
+ }
+
+ void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Fixup any references which may have changed.
+ for (std::pair<jobject, mirror::Object**>& it : references_) {
+ *it.second = soa_->Decode<mirror::Object*>(it.first);
}
}
private:
ScopedObjectAccessUnchecked* soa_;
-
+ std::vector<std::pair<jobject, mirror::Object**> > references_;
DISALLOW_COPY_AND_ASSIGN(RememberFoGcArgumentVisitor);
};
@@ -556,11 +566,8 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
}
}
CHECK_EQ(code == NULL, thread->IsExceptionPending());
-#ifdef MOVING_GARBAGE_COLLECTOR
- // TODO: locally saved objects may have moved during a GC during resolution. Need to update the
- // registers so that the stale objects aren't passed to the method we've resolved.
- UNIMPLEMENTED(WARNING);
-#endif
+ // Fixup any locally saved objects may have moved during a GC.
+ visitor.FixupReferences();
// Place called method in callee-save frame to be placed as first argument to quick method.
*sp = called;
return code;
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 29450c1d34..fb425df78a 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -28,9 +28,11 @@ namespace accounting {
// A mod-union table to record image references to the Zygote and alloc space.
class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
public:
- explicit ModUnionTableToZygoteAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {}
+ explicit ModUnionTableToZygoteAllocspace(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space)
+ : ModUnionTableReferenceCache(name, heap, space) {}
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
typedef std::vector<space::ContinuousSpace*>::const_iterator It;
for (It it = spaces.begin(); it != spaces.end(); ++it) {
@@ -47,16 +49,18 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
// A mod-union table to record Zygote references to the alloc space.
class ModUnionTableToAllocspace : public ModUnionTableReferenceCache {
public:
- explicit ModUnionTableToAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {}
+ explicit ModUnionTableToAllocspace(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space)
+ : ModUnionTableReferenceCache(name, heap, space) {}
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
typedef std::vector<space::ContinuousSpace*>::const_iterator It;
for (It it = spaces.begin(); it != spaces.end(); ++it) {
space::ContinuousSpace* space = *it;
if (space->Contains(ref)) {
// The allocation space is always considered for collection whereas the Zygote space is
- //
+ // only considered for full GC.
return space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
}
}
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 486521951a..7cbe94d3d2 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -19,6 +19,7 @@
#include "base/stl_util.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
+#include "gc/collector/mark_sweep.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/heap.h"
#include "gc/space/space.h"
@@ -67,60 +68,87 @@ class ModUnionClearCardVisitor {
std::vector<byte*>* const cleared_cards_;
};
+class ModUnionUpdateObjectReferencesVisitor {
+ public:
+ ModUnionUpdateObjectReferencesVisitor(RootVisitor visitor, void* arg)
+ : visitor_(visitor),
+ arg_(arg) {
+ }
+
+ // Extra parameters are required since we use this same visitor signature for checking objects.
+ void operator()(Object* obj, Object* ref, const MemberOffset& offset,
+ bool /* is_static */) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Only add the reference if it is non null and fits our criteria.
+ if (ref != nullptr) {
+ Object* new_ref = visitor_(ref, arg_);
+ if (new_ref != ref) {
+ obj->SetFieldObject(offset, ref, false, true);
+ }
+ }
+ }
+
+ private:
+ RootVisitor* visitor_;
+ void* arg_;
+};
+
class ModUnionScanImageRootVisitor {
public:
- explicit ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep)
- : mark_sweep_(mark_sweep) {}
+ ModUnionScanImageRootVisitor(RootVisitor visitor, void* arg)
+ : visitor_(visitor), arg_(arg) {}
- void operator()(const Object* root) const
+ void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
- mark_sweep_->ScanRoot(root);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, arg_);
+ collector::MarkSweep::VisitObjectReferences(root, ref_visitor, true);
}
private:
- collector::MarkSweep* const mark_sweep_;
+ RootVisitor* visitor_;
+ void* arg_;
};
-void ModUnionTableReferenceCache::ClearCards(space::ContinuousSpace* space) {
+void ModUnionTableReferenceCache::ClearCards() {
CardTable* card_table = GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+ card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
class AddToReferenceArrayVisitor {
public:
explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
- std::vector<const Object*>* references)
+ std::vector<Object**>* references)
: mod_union_table_(mod_union_table),
references_(references) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ void operator()(Object* obj, Object* ref, const MemberOffset& offset,
bool /* is_static */) const {
// Only add the reference if it is non null and fits our criteria.
- if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
- references_->push_back(ref);
+ if (ref != nullptr && mod_union_table_->AddReference(obj, ref)) {
+ // Push the adddress of the reference.
+ references_->push_back(obj->GetFieldObjectAddr(offset));
}
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
- std::vector<const Object*>* const references_;
+ std::vector<Object**>* const references_;
};
class ModUnionReferenceVisitor {
public:
explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
- std::vector<const Object*>* references)
+ std::vector<Object**>* references)
: mod_union_table_(mod_union_table),
references_(references) {
}
- void operator()(const Object* obj) const
+ void operator()(Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != NULL);
// We don't have an early exit since we use the visitor pattern, an early
@@ -130,7 +158,7 @@ class ModUnionReferenceVisitor {
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
- std::vector<const Object*>* const references_;
+ std::vector<Object**>* const references_;
};
class CheckReferenceVisitor {
@@ -143,8 +171,8 @@ class CheckReferenceVisitor {
// Extra parameters are required since we use this same visitor signature for checking objects.
// TODO: Fixme when anotatalysis works with visitors.
- void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const
+ void operator()(const Object* obj, const Object* ref,
+ const MemberOffset& /* offset */, bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Heap* heap = mod_union_table_->GetHeap();
if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
@@ -174,7 +202,7 @@ class ModUnionCheckReferences {
: mod_union_table_(mod_union_table), references_(references) {
}
- void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
DCHECK(obj != NULL);
CheckReferenceVisitor visitor(mod_union_table_, references_);
@@ -188,26 +216,25 @@ class ModUnionCheckReferences {
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
- Heap* heap = GetHeap();
- for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
- for (const Object* ref : it.second) {
- CHECK(heap->IsLiveObjectLocked(ref));
+ for (const auto& ref_pair : references_) {
+ for (Object** ref : ref_pair.second) {
+ CHECK(heap_->IsLiveObjectLocked(*ref));
}
}
// Check the references of each clean card which is also in the mod union table.
- CardTable* card_table = heap->GetCardTable();
- for (const std::pair<const byte*, std::vector<const Object*> > & it : references_) {
- const byte* card = it.first;
+ CardTable* card_table = heap_->GetCardTable();
+ SpaceBitmap* live_bitmap = space_->GetLiveBitmap();
+ for (const auto& ref_pair : references_) {
+ const byte* card = ref_pair.first;
if (*card == CardTable::kCardClean) {
- std::set<const Object*> reference_set(it.second.begin(), it.second.end());
+ std::set<const Object*> reference_set;
+ for (Object** obj_ptr : ref_pair.second) {
+ reference_set.insert(*obj_ptr);
+ }
ModUnionCheckReferences visitor(this, reference_set);
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
- DCHECK(space != nullptr);
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- live_bitmap->VisitMarkedRange(start, end, visitor);
+ live_bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, visitor);
}
}
}
@@ -221,24 +248,24 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]\nModUnionTable references: [";
- for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
- const byte* card_addr = it.first;
+ for (const auto& ref_pair : references_) {
+ const byte* card_addr = ref_pair.first;
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
- for (const mirror::Object* ref : it.second) {
- os << reinterpret_cast<const void*>(ref) << ",";
+ for (Object** ref : ref_pair.second) {
+ os << reinterpret_cast<const void*>(*ref) << ",";
}
os << "},";
}
}
-void ModUnionTableReferenceCache::Update() {
+void ModUnionTableReferenceCache::UpdateAndMarkReferences(RootVisitor visitor, void* arg) {
Heap* heap = GetHeap();
CardTable* card_table = heap->GetCardTable();
- std::vector<const Object*> cards_references;
- ModUnionReferenceVisitor visitor(this, &cards_references);
+ std::vector<Object**> cards_references;
+ ModUnionReferenceVisitor add_visitor(this, &cards_references);
for (const auto& card : cleared_cards_) {
// Clear and re-compute alloc space references associated with this card.
@@ -248,7 +275,7 @@ void ModUnionTableReferenceCache::Update() {
auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
DCHECK(space != nullptr);
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- live_bitmap->VisitMarkedRange(start, end, visitor);
+ live_bitmap->VisitMarkedRange(start, end, add_visitor);
// Update the corresponding references for the card.
auto found = references_.find(card);
@@ -263,46 +290,41 @@ void ModUnionTableReferenceCache::Update() {
}
}
cleared_cards_.clear();
-}
-
-void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_sweep) {
size_t count = 0;
-
for (const auto& ref : references_) {
- for (const auto& obj : ref.second) {
- mark_sweep->MarkRoot(obj);
- ++count;
+ for (const auto& obj_ptr : ref.second) {
+ Object* obj = *obj_ptr;
+ if (obj != nullptr) {
+ Object* new_obj = visitor(obj, arg);
+ // Avoid dirtying pages in the image unless necessary.
+ if (new_obj != obj) {
+ *obj_ptr = new_obj;
+ }
+ }
}
+ count += ref.second.size();
}
if (VLOG_IS_ON(heap)) {
VLOG(gc) << "Marked " << count << " references in mod union table";
}
}
-void ModUnionTableCardCache::ClearCards(space::ContinuousSpace* space) {
+void ModUnionTableCardCache::ClearCards() {
CardTable* card_table = GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+ card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::MarkReferences(collector::MarkSweep* mark_sweep) {
+void ModUnionTableCardCache::UpdateAndMarkReferences(RootVisitor visitor, void* arg) {
CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor visitor(mark_sweep);
- space::ContinuousSpace* space = nullptr;
- SpaceBitmap* bitmap = nullptr;
+ ModUnionScanImageRootVisitor scan_visitor(visitor, arg);
+ SpaceBitmap* bitmap = space_->GetLiveBitmap();
for (const byte* card_addr : cleared_cards_) {
- auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
- auto end = start + CardTable::kCardSize;
- auto obj_start = reinterpret_cast<Object*>(start);
- if (UNLIKELY(space == nullptr || !space->Contains(obj_start))) {
- space = heap_->FindContinuousSpaceFromObject(obj_start, false);
- DCHECK(space != nullptr);
- bitmap = space->GetLiveBitmap();
- DCHECK(bitmap != nullptr);
- }
- bitmap->VisitMarkedRange(start, end, visitor);
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
+ bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
}
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index eb7a754d24..d874c6080c 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -19,6 +19,7 @@
#include "gc_allocator.h"
#include "globals.h"
+#include "root_visitor.h"
#include "safe_map.h"
#include <set>
@@ -52,21 +53,23 @@ class ModUnionTable {
public:
typedef std::set<byte*, std::less<byte*>, GCAllocator<byte*> > CardSet;
- explicit ModUnionTable(Heap* heap) : heap_(heap) {}
+ explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
+ : name_(name),
+ heap_(heap),
+ space_(space) {
+ }
virtual ~ModUnionTable() {}
// Clear cards which map to a memory range of a space. This doesn't immediately update the
// mod-union table, as updating the mod-union table may have an associated cost, such as
// determining references to track.
- virtual void ClearCards(space::ContinuousSpace* space) = 0;
+ virtual void ClearCards() = 0;
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
- // before a call to update, for example, back-to-back sticky GCs.
- virtual void Update() = 0;
-
- // Mark the bitmaps for all references which are stored in the mod-union table.
- virtual void MarkReferences(collector::MarkSweep* mark_sweep) = 0;
+ // before a call to update, for example, back-to-back sticky GCs. Also mark references to other
+ // spaces which are stored in the mod-union table.
+ virtual void UpdateAndMarkReferences(RootVisitor visitor, void* arg) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -75,31 +78,35 @@ class ModUnionTable {
virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
virtual void Dump(std::ostream& os) = 0;
-
+ space::ContinuousSpace* GetSpace() {
+ return space_;
+ }
Heap* GetHeap() const {
return heap_;
}
+ const std::string& GetName() const {
+ return name_;
+ }
protected:
+ const std::string name_;
Heap* const heap_;
+ space::ContinuousSpace* const space_;
};
// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
class ModUnionTableReferenceCache : public ModUnionTable {
public:
- explicit ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {}
+ explicit ModUnionTableReferenceCache(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space)
+ : ModUnionTable(name, heap, space) {}
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ClearCards(space::ContinuousSpace* space);
+ void ClearCards();
- // Update table based on cleared cards.
- void Update()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Mark all references to the alloc space(s).
- void MarkReferences(collector::MarkSweep* mark_sweep)
+ // Update table based on cleared cards and mark all references to the other spaces.
+ void UpdateAndMarkReferences(RootVisitor visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -117,24 +124,22 @@ class ModUnionTableReferenceCache : public ModUnionTable {
ModUnionTable::CardSet cleared_cards_;
// Maps from dirty cards to their corresponding alloc space references.
- SafeMap<const byte*, std::vector<const mirror::Object*>, std::less<const byte*>,
- GCAllocator<std::pair<const byte*, std::vector<const mirror::Object*> > > > references_;
+ SafeMap<const byte*, std::vector<mirror::Object**>, std::less<const byte*>,
+ GCAllocator<std::pair<const byte*, std::vector<mirror::Object**> > > > references_;
};
// Card caching implementation. Keeps track of which cards we cleared and only this information.
class ModUnionTableCardCache : public ModUnionTable {
public:
- explicit ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {}
+ explicit ModUnionTableCardCache(const std::string& name, Heap* heap, space::ContinuousSpace* space)
+ : ModUnionTable(name, heap, space) {}
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- void ClearCards(space::ContinuousSpace* space);
-
- // Nothing to update as all dirty cards were placed into cleared cards during clearing.
- void Update() {}
+ void ClearCards();
// Mark all references to the alloc space(s).
- void MarkReferences(collector::MarkSweep* mark_sweep)
+ void UpdateAndMarkReferences(RootVisitor visitor, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index f975692a3f..4cf88724b9 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -247,8 +247,8 @@ class SpaceSetMap {
template <typename Visitor>
void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS {
- for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
- visitor(*it);
+ for (const mirror::Object* obj : contained_) {
+ visitor(const_cast<mirror::Object*>(obj));
}
}
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index 07ebd1c0e3..19159b1353 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -18,6 +18,8 @@
#define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
// Configure dlmalloc for mspaces.
+// Avoid a collision with one used in llvm.
+#undef HAVE_MMAP
#define HAVE_MMAP 0
#define HAVE_MREMAP 0
#define HAVE_MORECORE 1
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index d0b0b5c930..270c9efde9 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -29,7 +29,7 @@ namespace gc {
namespace collector {
template <typename MarkVisitor>
-inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
+inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) {
DCHECK(obj != NULL);
if (kIsDebugBuild && !IsMarked(obj)) {
heap_->DumpSpaces();
@@ -62,7 +62,8 @@ inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisi
}
template <typename Visitor>
-inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
+ bool visit_class)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
DCHECK(obj != NULL);
@@ -70,6 +71,9 @@ inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Vi
mirror::Class* klass = obj->GetClass();
DCHECK(klass != NULL);
+ if (visit_class) {
+ visitor(obj, klass, MemberOffset(0), false);
+ }
if (klass == mirror::Class::GetJavaLangClass()) {
DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
VisitClassReferences(klass, obj, visitor);
@@ -86,8 +90,8 @@ inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Vi
}
template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass,
- const mirror::Object* obj,
+inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass,
+ mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != NULL);
@@ -96,7 +100,7 @@ inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass,
}
template <typename Visitor>
-inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+inline void MarkSweep::VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
VisitInstanceFieldsReferences(klass, obj, visitor);
@@ -104,15 +108,14 @@ inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mi
}
template <typename Visitor>
-inline void MarkSweep::VisitStaticFieldsReferences(const mirror::Class* klass,
- const Visitor& visitor)
+inline void MarkSweep::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(klass != NULL);
VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
}
template <typename Visitor>
-inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets,
+inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets,
bool is_static, const Visitor& visitor) {
if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
// Found a reference offset bitmap. Mark the specified offsets.
@@ -124,7 +127,7 @@ inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
@@ -143,7 +146,7 @@ inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t
mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
: klass->GetInstanceField(i));
MemberOffset field_offset = field->GetOffset();
- const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
}
}
@@ -151,11 +154,11 @@ inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t
}
template <typename Visitor>
-inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+inline void MarkSweep::VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
const Visitor& visitor) {
const size_t length = static_cast<size_t>(array->GetLength());
for (size_t i = 0; i < length; ++i) {
- const mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
+ mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
const size_t width = sizeof(mirror::Object*);
MemberOffset offset(i * width + mirror::Array::DataOffset(width).Int32Value());
visitor(array, element, offset, false);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 6790144603..a5e66d20df 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -28,6 +28,7 @@
#include "base/timing_logger.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
@@ -99,7 +100,7 @@ void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
} else {
const space::ContinuousSpace* prev_space = nullptr;
// Find out if the previous space is immune.
- for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+ for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
if (cur_space == space) {
break;
}
@@ -107,15 +108,19 @@ void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
}
// If previous space was immune, then extend the immune region. Relies on continuous spaces
// being sorted by Heap::AddContinuousSpace.
- if (prev_space != NULL &&
- immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
- immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+ if (prev_space != NULL && IsImmuneSpace(prev_space)) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
}
}
}
+bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) {
+ return
+ immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+ immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
void MarkSweep::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -263,11 +268,23 @@ void MarkSweep::MarkingPhase() {
}
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
MarkConcurrentRoots();
-
- heap_->UpdateAndMarkModUnion(this, timings_, GetGcType());
+ UpdateAndMarkModUnion();
MarkReachableObjects();
}
+void MarkSweep::UpdateAndMarkModUnion() {
+ for (const auto& space : heap_->GetContinuousSpaces()) {
+ if (IsImmuneSpace(space)) {
+ const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable";
+ base::TimingLogger::ScopedSplit split(name, &timings_);
+ accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
+ CHECK(mod_union_table != nullptr);
+ mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this);
+ }
+ }
+}
+
void MarkSweep::MarkThreadRoots(Thread* self) {
MarkRootsCheckpoint(self);
}
@@ -519,24 +536,18 @@ void MarkSweep::MarkRoot(const Object* obj) {
}
}
-void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
+Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) {
DCHECK(root != NULL);
DCHECK(arg != NULL);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
+ return root;
}
-void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObjectNonNull(root);
-}
-
-void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObjectNonNull(root);
+Object* MarkSweep::MarkRootCallback(Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root);
+ return root;
}
void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
@@ -564,30 +575,30 @@ void MarkSweep::VerifyRoots() {
// Marks all objects in the root set.
void MarkSweep::MarkRoots() {
timings_.StartSplit("MarkRoots");
- Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
+ Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
timings_.EndSplit();
}
void MarkSweep::MarkNonThreadRoots() {
timings_.StartSplit("MarkNonThreadRoots");
- Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
+ Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
timings_.EndSplit();
}
void MarkSweep::MarkConcurrentRoots() {
timings_.StartSplit("MarkConcurrentRoots");
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
+ Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
timings_.EndSplit();
}
void MarkSweep::CheckObject(const Object* obj) {
DCHECK(obj != NULL);
- VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
- bool is_static) NO_THREAD_SAFETY_ANALYSIS {
+ VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref,
+ MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
CheckReference(obj, ref, offset, is_static);
- });
+ }, true);
}
void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
@@ -653,11 +664,11 @@ class MarkStackTask : public Task {
explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
: chunk_task_(chunk_task) {}
- void operator()(const Object* obj) const {
+ void operator()(Object* obj) const {
MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
mark_sweep->ScanObjectVisit(obj,
- [mark_sweep, this](const Object* /* obj */, const Object* ref,
- const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
+ [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) ALWAYS_INLINE {
if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
if (kUseFinger) {
android_memory_barrier();
@@ -714,11 +725,11 @@ class MarkStackTask : public Task {
static const size_t kFifoSize = 4;
BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
for (;;) {
- const Object* obj = NULL;
+ const Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
const Object* obj = mark_stack_[--mark_stack_pos_];
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
__builtin_prefetch(obj);
prefetch_fifo.push_back(obj);
}
@@ -733,8 +744,8 @@ class MarkStackTask : public Task {
}
obj = mark_stack_[--mark_stack_pos_];
}
- DCHECK(obj != NULL);
- visitor(obj);
+ DCHECK(obj != nullptr);
+ visitor(const_cast<mirror::Object*>(obj));
}
}
};
@@ -990,8 +1001,11 @@ void MarkSweep::RecursiveMark() {
ProcessMarkStack(false);
}
-bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
- return reinterpret_cast<MarkSweep*>(arg)->IsMarked(object);
+mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+ if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
+ return object;
+ }
+ return nullptr;
}
void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
@@ -1001,45 +1015,21 @@ void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
void MarkSweep::ReMarkRoots() {
timings_.StartSplit("ReMarkRoots");
- Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
+ Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
timings_.EndSplit();
}
-void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
- Runtime::Current()->GetJavaVM()->SweepWeakGlobals(is_marked, arg);
-}
-
-struct ArrayMarkedCheck {
- accounting::ObjectStack* live_stack;
- MarkSweep* mark_sweep;
-};
-
-// Either marked or not live.
-bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
- ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
- if (array_check->mark_sweep->IsMarked(object)) {
- return true;
- }
- accounting::ObjectStack* live_stack = array_check->live_stack;
- if (std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End()) {
- return true;
- }
- return false;
-}
-
void MarkSweep::SweepSystemWeaks() {
Runtime* runtime = Runtime::Current();
timings_.StartSplit("SweepSystemWeaks");
- runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
- runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
- SweepJniWeakGlobals(IsMarkedCallback, this);
+ runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
timings_.EndSplit();
}
-bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
+mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
// We don't actually want to sweep the object, so lets return "marked"
- return true;
+ return obj;
}
void MarkSweep::VerifyIsLive(const Object* obj) {
@@ -1058,11 +1048,8 @@ void MarkSweep::VerifyIsLive(const Object* obj) {
}
void MarkSweep::VerifySystemWeaks() {
- Runtime* runtime = Runtime::Current();
- // Verify system weaks, uses a special IsMarked callback which always returns true.
- runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
- runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
- runtime->GetJavaVM()->SweepWeakGlobals(VerifyIsLiveCallback, this);
+ // Verify system weaks, uses a special object visitor which returns the input object.
+ Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
}
struct SweepCallbackContext {
@@ -1396,7 +1383,7 @@ class MarkObjectVisitor {
// and dispatches to a specialized scanning routine.
void MarkSweep::ScanObject(const Object* obj) {
MarkObjectVisitor visitor(this);
- ScanObjectVisit(obj, visitor);
+ ScanObjectVisit(const_cast<Object*>(obj), visitor);
}
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index feef992c83..19df2da0b6 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -114,6 +114,9 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImmuneSpace(const space::ContinuousSpace* space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -137,6 +140,9 @@ class MarkSweep : public GarbageCollector {
void ProcessReferences(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void UpdateAndMarkModUnion()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Sweeps unmarked objects to complete the garbage collection.
virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -163,7 +169,7 @@ class MarkSweep : public GarbageCollector {
// TODO: enable thread safety analysis when in use by multiple worker threads.
template <typename MarkVisitor>
- void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+ void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
NO_THREAD_SAFETY_ANALYSIS;
size_t GetFreedBytes() const {
@@ -204,7 +210,7 @@ class MarkSweep : public GarbageCollector {
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
+ static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void VerifySystemWeaks()
@@ -215,15 +221,16 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
template <typename Visitor>
- static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+ static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
+ bool visit_class = false)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
- static void MarkObjectCallback(const mirror::Object* root, void* arg)
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void MarkRootParallelCallback(const mirror::Object* root, void* arg);
+ static mirror::Object* MarkRootParallelCallback(mirror::Object* root, void* arg);
// Marks an object.
void MarkObject(const mirror::Object* obj)
@@ -242,16 +249,12 @@ class MarkSweep : public GarbageCollector {
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const mirror::Object* object) const;
- static bool IsMarkedCallback(const mirror::Object* object, void* arg)
+ static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg)
+ static mirror::Object* SystemWeakIsMarkedArrayCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ReMarkObjectVisitor(const mirror::Object* root, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
@@ -310,7 +313,7 @@ class MarkSweep : public GarbageCollector {
size_t GetThreadCount(bool paused) const;
// Returns true if an object is inside of the immune region (assumed to be marked).
- bool IsImmune(const mirror::Object* obj) const {
+ bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
return obj >= immune_begin_ && obj < immune_end_;
}
@@ -321,34 +324,34 @@ class MarkSweep : public GarbageCollector {
NO_THREAD_SAFETY_ANALYSIS;
template <typename Visitor>
- static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+ static void VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visit the header, static field references, and interface pointers of a class object.
template <typename Visitor>
- static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+ static void VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
template <typename Visitor>
- static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+ static void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
template <typename Visitor>
- static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+ static void VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets, bool is_static,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visit all of the references in an object array.
template <typename Visitor>
- static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+ static void VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Visits the header and field references of a data object.
template <typename Visitor>
- static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+ static void VisitOtherReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
return VisitInstanceFieldsReferences(klass, obj, visitor);
@@ -390,9 +393,6 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
// Whether or not we count how many of each type of object were scanned.
static const bool kCountScannedTypes = false;
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 79c4359901..8bee00f0b8 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -31,6 +31,10 @@ class StickyMarkSweep : public PartialMarkSweep {
return kGcTypeSticky;
}
+ // Don't need to do anything special here since we scan all the cards which may have references
+ // to the newly allocated objects.
+ virtual void UpdateAndMarkModUnion() { }
+
explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
~StickyMarkSweep() {}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
new file mode 100644
index 0000000000..b7ef77c35a
--- /dev/null
+++ b/runtime/gc/heap-inl.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_HEAP_INL_H_
+#define ART_RUNTIME_GC_HEAP_INL_H_
+
+#include "heap.h"
+
+#include "debugger.h"
+#include "gc/space/dlmalloc_space-inl.h"
+#include "gc/space/large_object_space.h"
+#include "object_utils.h"
+#include "runtime.h"
+#include "thread.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace gc {
+
+inline mirror::Object* Heap::AllocObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
+ DebugCheckPreconditionsForAllobObject(c, byte_count);
+ mirror::Object* obj;
+ size_t bytes_allocated;
+ AllocationTimer alloc_timer(this, &obj);
+ bool large_object_allocation = TryAllocLargeObjectUninstrumented(self, c, byte_count,
+ &obj, &bytes_allocated);
+ if (LIKELY(!large_object_allocation)) {
+ // Non-large object allocation.
+ obj = AllocateUninstrumented(self, alloc_space_, byte_count, &bytes_allocated);
+ // Ensure that we did not allocate into a zygote space.
+ DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
+ }
+ if (LIKELY(obj != NULL)) {
+ obj->SetClass(c);
+ // Record allocation after since we want to use the atomic add for the atomic fence to guard
+ // the SetClass since we do not want the class to appear NULL in another thread.
+ size_t new_num_bytes_allocated = RecordAllocationUninstrumented(bytes_allocated, obj);
+ DCHECK(!Dbg::IsAllocTrackingEnabled());
+ CheckConcurrentGC(self, new_num_bytes_allocated, obj);
+ if (kDesiredHeapVerification > kNoHeapVerification) {
+ VerifyObject(obj);
+ }
+ return obj;
+ }
+ ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
+ return NULL;
+}
+
+inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) {
+ DCHECK(obj != NULL);
+ DCHECK_GT(size, 0u);
+ size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
+
+ DCHECK(!Runtime::Current()->HasStatsEnabled());
+
+ // This is safe to do since the GC will never free objects which are neither in the allocation
+ // stack or the live bitmap.
+ while (!allocation_stack_->AtomicPushBack(obj)) {
+ CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ }
+
+ return old_num_bytes_allocated + size;
+}
+
+inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated) {
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
+ return NULL;
+ }
+ DCHECK(!running_on_valgrind_);
+ return space->Alloc(self, alloc_size, bytes_allocated);
+}
+
+// DlMallocSpace-specific version.
+inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated) {
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
+ return NULL;
+ }
+ DCHECK(!running_on_valgrind_);
+ return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
+}
+
+template <class T>
+inline mirror::Object* Heap::AllocateUninstrumented(Thread* self, T* space, size_t alloc_size,
+ size_t* bytes_allocated) {
+ // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
+ // done in the runnable state where suspension is expected.
+ DCHECK_EQ(self->GetState(), kRunnable);
+ self->AssertThreadSuspensionIsAllowable();
+
+ mirror::Object* ptr = TryToAllocateUninstrumented(self, space, alloc_size, false, bytes_allocated);
+ if (LIKELY(ptr != NULL)) {
+ return ptr;
+ }
+ return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
+}
+
+inline bool Heap::TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
+ mirror::Object** obj_ptr, size_t* bytes_allocated) {
+ bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
+ if (UNLIKELY(large_object_allocation)) {
+ mirror::Object* obj = AllocateUninstrumented(self, large_object_space_, byte_count, bytes_allocated);
+ // Make sure that our large object didn't get placed anywhere within the space interval or else
+ // it breaks the immune range.
+ DCHECK(obj == NULL ||
+ reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
+ reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
+ *obj_ptr = obj;
+ }
+ return large_object_allocation;
+}
+
+inline void Heap::DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count) {
+ DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
+ ClassHelper(c).GetDescriptorAsStringPiece().length() == 0);
+ DCHECK_GE(byte_count, sizeof(mirror::Object));
+}
+
+inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
+ : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
+ if (kMeasureAllocationTime) {
+ allocation_start_time_ = NanoTime() / kTimeAdjust;
+ }
+}
+
+inline Heap::AllocationTimer::~AllocationTimer() {
+ if (kMeasureAllocationTime) {
+ mirror::Object* allocated_obj = *allocated_obj_ptr_;
+ // Only if the allocation succeeded, record the time.
+ if (allocated_obj != NULL) {
+ uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
+ heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
+ }
+ }
+};
+
+inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) {
+ // We need to have a zygote space or else our newly allocated large object can end up in the
+ // Zygote resulting in it being prematurely freed.
+ // We can only do this for primitive objects since large objects will not be within the card table
+ // range. This also means that we rely on SetClass not dirtying the object's card.
+ return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
+}
+
+inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
+ size_t new_footprint = num_bytes_allocated_ + alloc_size;
+ if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
+ if (UNLIKELY(new_footprint > growth_limit_)) {
+ return true;
+ }
+ if (!concurrent_gc_) {
+ if (!grow) {
+ return true;
+ } else {
+ max_allowed_footprint_ = new_footprint;
+ }
+ }
+ }
+ return false;
+}
+
+inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj) {
+ if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
+ // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
+ SirtRef<mirror::Object> ref(self, obj);
+ RequestConcurrentGC(self);
+ }
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_HEAP_INL_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1b46257c08..c0e46ac165 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -39,6 +39,7 @@
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
+#include "heap-inl.h"
#include "image.h"
#include "invoke_arg_array_builder.h"
#include "mirror/art_field-inl.h"
@@ -63,8 +64,6 @@ static constexpr size_t kGcAlotInterval = KB;
static constexpr bool kDumpGcPerformanceOnShutdown = false;
// Minimum amount of remaining bytes before a concurrent GC is triggered.
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
-// If true, measure the total allocation time.
-static constexpr bool kMeasureAllocationTime = false;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, size_t capacity, const std::string& original_image_file_name,
@@ -105,7 +104,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
: std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
- large_object_threshold_(3 * kPageSize),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
gc_memory_overhead_(0),
@@ -191,11 +189,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != NULL) << "Failed to create card table";
- image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this));
- CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table";
-
- zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this));
- CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
+ accounting::ModUnionTable* mod_union_table =
+ new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
+ GetImageSpace());
+ CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
+ AddModUnionTable(mod_union_table);
// TODO: Count objects in the image space here.
num_bytes_allocated_ = 0;
@@ -238,6 +236,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
CHECK_NE(max_allowed_footprint_, 0U);
+
+ if (running_on_valgrind_) {
+ Runtime::Current()->InstrumentQuickAllocEntryPoints();
+ }
+
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
}
@@ -489,10 +492,7 @@ Heap::~Heap() {
live_stack_->Reset();
VLOG(heap) << "~Heap()";
- // We can't take the heap lock here because there might be a daemon thread suspended with the
- // heap lock held. We know though that no non-daemon threads are executing, and we know that
- // all daemon threads are suspended, and we also know that the threads list have been deleted, so
- // those threads can't resume. We're the only running thread, and we can do whatever we like...
+ STLDeleteValues(&mod_union_tables_);
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
@@ -554,81 +554,69 @@ static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void*
}
}
-mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) {
- DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
- strlen(ClassHelper(c).GetDescriptor()) == 0);
- DCHECK_GE(byte_count, sizeof(mirror::Object));
-
- mirror::Object* obj = NULL;
- size_t bytes_allocated = 0;
- uint64_t allocation_start = 0;
- if (UNLIKELY(kMeasureAllocationTime)) {
- allocation_start = NanoTime() / kTimeAdjust;
+void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
+ std::ostringstream oss;
+ int64_t total_bytes_free = GetFreeMemory();
+ oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
+ << " free bytes";
+ // If the allocation failed due to fragmentation, print out the largest continuous allocation.
+ if (!large_object_allocation && total_bytes_free >= byte_count) {
+ size_t max_contiguous_allocation = 0;
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsDlMallocSpace()) {
+ space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
+ }
+ }
+ oss << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
}
+ self->ThrowOutOfMemoryError(oss.str().c_str());
+}
- // We need to have a zygote space or else our newly allocated large object can end up in the
- // Zygote resulting in it being prematurely freed.
- // We can only do this for primitive objects since large objects will not be within the card table
- // range. This also means that we rely on SetClass not dirtying the object's card.
- bool large_object_allocation =
- byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray();
+inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
+ mirror::Object** obj_ptr, size_t* bytes_allocated) {
+ bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
if (UNLIKELY(large_object_allocation)) {
- obj = Allocate(self, large_object_space_, byte_count, &bytes_allocated);
+ mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated);
// Make sure that our large object didn't get placed anywhere within the space interval or else
// it breaks the immune range.
DCHECK(obj == NULL ||
reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
- } else {
- obj = Allocate(self, alloc_space_, byte_count, &bytes_allocated);
+ *obj_ptr = obj;
+ }
+ return large_object_allocation;
+}
+
+mirror::Object* Heap::AllocObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
+ DebugCheckPreconditionsForAllobObject(c, byte_count);
+ mirror::Object* obj;
+ size_t bytes_allocated;
+ AllocationTimer alloc_timer(this, &obj);
+ bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count,
+ &obj, &bytes_allocated);
+ if (LIKELY(!large_object_allocation)) {
+ // Non-large object allocation.
+ obj = AllocateInstrumented(self, alloc_space_, byte_count, &bytes_allocated);
// Ensure that we did not allocate into a zygote space.
DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
}
-
if (LIKELY(obj != NULL)) {
obj->SetClass(c);
-
// Record allocation after since we want to use the atomic add for the atomic fence to guard
// the SetClass since we do not want the class to appear NULL in another thread.
- RecordAllocation(bytes_allocated, obj);
-
+ size_t new_num_bytes_allocated = RecordAllocationInstrumented(bytes_allocated, obj);
if (Dbg::IsAllocTrackingEnabled()) {
Dbg::RecordAllocation(c, byte_count);
}
- if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_)) {
- // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<mirror::Object> ref(self, obj);
- RequestConcurrentGC(self);
- }
+ CheckConcurrentGC(self, new_num_bytes_allocated, obj);
if (kDesiredHeapVerification > kNoHeapVerification) {
VerifyObject(obj);
}
-
- if (UNLIKELY(kMeasureAllocationTime)) {
- total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start);
- }
-
return obj;
- } else {
- std::ostringstream oss;
- int64_t total_bytes_free = GetFreeMemory();
- oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
- << " free bytes";
- // If the allocation failed due to fragmentation, print out the largest continuous allocation.
- if (!large_object_allocation && total_bytes_free >= byte_count) {
- size_t max_contiguous_allocation = 0;
- for (const auto& space : continuous_spaces_) {
- if (space->IsDlMallocSpace()) {
- space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
- }
- }
- oss << "; failed due to fragmentation (largest possible contiguous allocation "
- << max_contiguous_allocation << " bytes)";
- }
- self->ThrowOutOfMemoryError(oss.str().c_str());
- return NULL;
}
+ ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
+ return NULL;
}
bool Heap::IsHeapAddress(const mirror::Object* obj) {
@@ -771,10 +759,10 @@ void Heap::VerifyHeap() {
GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
}
-inline void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
+inline size_t Heap::RecordAllocationInstrumented(size_t size, mirror::Object* obj) {
DCHECK(obj != NULL);
DCHECK_GT(size, 0u);
- num_bytes_allocated_.fetch_add(size);
+ size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
@@ -792,6 +780,8 @@ inline void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
while (!allocation_stack_->AtomicPushBack(obj)) {
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
}
+
+ return old_num_bytes_allocated + size;
}
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
@@ -810,25 +800,8 @@ void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
}
}
-inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
- size_t new_footprint = num_bytes_allocated_ + alloc_size;
- if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
- if (UNLIKELY(new_footprint > growth_limit_)) {
- return true;
- }
- if (!concurrent_gc_) {
- if (!grow) {
- return true;
- } else {
- max_allowed_footprint_ = new_footprint;
- }
- }
- }
- return false;
-}
-
-inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
+inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated) {
if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
return NULL;
}
@@ -836,8 +809,8 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* spac
}
// DlMallocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
+inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated) {
if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
return NULL;
}
@@ -849,15 +822,15 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* s
}
template <class T>
-inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size,
- size_t* bytes_allocated) {
+inline mirror::Object* Heap::AllocateInstrumented(Thread* self, T* space, size_t alloc_size,
+ size_t* bytes_allocated) {
// Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
// done in the runnable state where suspension is expected.
DCHECK_EQ(self->GetState(), kRunnable);
self->AssertThreadSuspensionIsAllowable();
- mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
- if (ptr != NULL) {
+ mirror::Object* ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
+ if (LIKELY(ptr != NULL)) {
return ptr;
}
return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
@@ -872,7 +845,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
if (last_gc != collector::kGcTypeNone) {
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
- ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
+ ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
if (ptr != NULL) {
return ptr;
}
@@ -907,7 +880,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
i = static_cast<size_t>(gc_type_ran);
// Did we free sufficient memory for the allocation to succeed?
- ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
+ ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
if (ptr != NULL) {
return ptr;
}
@@ -916,7 +889,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
// Allocations have failed after GCs; this is an exceptional state.
// Try harder, growing the heap if necessary.
- ptr = TryToAllocate(self, space, alloc_size, true, bytes_allocated);
+ ptr = TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
if (ptr != NULL) {
return ptr;
}
@@ -931,7 +904,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* sp
// We don't need a WaitForConcurrentGcToComplete here either.
CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
- return TryToAllocate(self, space, alloc_size, true, bytes_allocated);
+ return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
}
void Heap::SetTargetHeapUtilization(float target) {
@@ -1084,15 +1057,15 @@ class ReferringObjectsFinder {
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
- collector::MarkSweep::VisitObjectReferences(o, *this);
+ void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ collector::MarkSweep::VisitObjectReferences(obj, *this, true);
}
// For MarkSweep::VisitObjectReferences.
- void operator()(const mirror::Object* referrer, const mirror::Object* object,
+ void operator()(mirror::Object* referrer, mirror::Object* object,
const MemberOffset&, bool) const {
if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
- referring_objects_.push_back(const_cast<mirror::Object*>(referrer));
+ referring_objects_.push_back(referrer);
}
}
@@ -1157,6 +1130,12 @@ void Heap::PreZygoteFork() {
AddContinuousSpace(alloc_space_);
have_zygote_space_ = true;
+ // Create the zygote space mod union table.
+ accounting::ModUnionTable* mod_union_table =
+ new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
+ CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
+ AddModUnionTable(mod_union_table);
+
// Reset the cumulative loggers since we now have a few additional timing phases.
for (const auto& collector : mark_sweep_collectors_) {
collector->ResetCumulativeStatistics();
@@ -1313,38 +1292,12 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
return gc_type;
}
-void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
- collector::GcType gc_type) {
- if (gc_type == collector::kGcTypeSticky) {
- // Don't need to do anything for mod union table in this case since we are only scanning dirty
- // cards.
- return;
- }
-
- base::TimingLogger::ScopedSplit split("UpdateModUnionTable", &timings);
- // Update zygote mod union table.
- if (gc_type == collector::kGcTypePartial) {
- base::TimingLogger::ScopedSplit split("UpdateZygoteModUnionTable", &timings);
- zygote_mod_union_table_->Update();
-
- timings.NewSplit("ZygoteMarkReferences");
- zygote_mod_union_table_->MarkReferences(mark_sweep);
- }
-
- // Processes the cards we cleared earlier and adds their objects into the mod-union table.
- timings.NewSplit("UpdateModUnionTable");
- image_mod_union_table_->Update();
-
- // Scans all objects in the mod-union table.
- timings.NewSplit("MarkImageToAllocSpaceReferences");
- image_mod_union_table_->MarkReferences(mark_sweep);
-}
-
-static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
+static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
if (root == obj) {
LOG(INFO) << "Object " << obj << " is a root";
}
+ return root;
}
class ScanVisitor {
@@ -1459,9 +1412,10 @@ class VerifyReferenceVisitor {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
- static void VerifyRoots(const mirror::Object* root, void* arg) {
+ static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
- (*visitor)(NULL, root, MemberOffset(0), true);
+ (*visitor)(nullptr, root, MemberOffset(0), true);
+ return root;
}
private:
@@ -1481,7 +1435,7 @@ class VerifyObjectVisitor {
VerifyReferenceVisitor visitor(heap_);
// The class doesn't count as a reference but we should verify it anyways.
visitor(obj, obj->GetClass(), MemberOffset(0), false);
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
failed_ = failed_ || visitor.Failed();
}
@@ -1514,8 +1468,10 @@ bool Heap::VerifyHeapReferences() {
// pointing to dead objects if they are not reachable.
if (visitor.Failed()) {
// Dump mod-union tables.
- image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
- zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: ");
+ for (const auto& table_pair : mod_union_tables_) {
+ accounting::ModUnionTable* mod_union_table = table_pair.second;
+ mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
+ }
DumpSpaces();
return false;
}
@@ -1599,10 +1555,10 @@ class VerifyLiveStackReferences {
: heap_(heap),
failed_(false) {}
- void operator()(const mirror::Object* obj) const
+ void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
}
bool Failed() const {
@@ -1638,15 +1594,23 @@ void Heap::SwapStacks() {
allocation_stack_.swap(live_stack_);
}
+accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
+ auto it = mod_union_tables_.find(space);
+ if (it == mod_union_tables_.end()) {
+ return nullptr;
+ }
+ return it->second;
+}
+
void Heap::ProcessCards(base::TimingLogger& timings) {
// Clear cards and keep track of cards cleared in the mod-union table.
for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- base::TimingLogger::ScopedSplit split("ImageModUnionClearCards", &timings);
- image_mod_union_table_->ClearCards(space);
- } else if (space->IsZygoteSpace()) {
- base::TimingLogger::ScopedSplit split("ZygoteModUnionClearCards", &timings);
- zygote_mod_union_table_->ClearCards(space);
+ accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
+ "ImageModUnionClearCards";
+ base::TimingLogger::ScopedSplit split(name, &timings);
+ table->ClearCards();
} else {
base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
@@ -1656,6 +1620,10 @@ void Heap::ProcessCards(base::TimingLogger& timings) {
}
}
+static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
+ return obj;
+}
+
void Heap::PreGcVerification(collector::GarbageCollector* gc) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
@@ -1689,10 +1657,11 @@ void Heap::PreGcVerification(collector::GarbageCollector* gc) {
if (verify_mod_union_table_) {
thread_list->SuspendAll();
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
- zygote_mod_union_table_->Update();
- zygote_mod_union_table_->Verify();
- image_mod_union_table_->Update();
- image_mod_union_table_->Verify();
+ for (const auto& table_pair : mod_union_tables_) {
+ accounting::ModUnionTable* mod_union_table = table_pair.second;
+ mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
+ mod_union_table->Verify();
+ }
thread_list->ResumeAll();
}
}
@@ -2146,5 +2115,10 @@ int64_t Heap::GetTotalMemory() const {
return ret;
}
+void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
+ DCHECK(mod_union_table != nullptr);
+ mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0b64261fa1..ffd3034674 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -101,6 +101,11 @@ enum HeapVerificationMode {
};
static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
+// If true, measure the total allocation time.
+static constexpr bool kMeasureAllocationTime = false;
+// Primitive arrays larger than this size are put in the large object space.
+static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
+
class Heap {
public:
static constexpr size_t kDefaultInitialSize = 2 * MB;
@@ -129,8 +134,18 @@ class Heap {
// Allocates and initializes storage for an object instance.
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectInstrumented(self, klass, num_bytes);
+ }
+ mirror::Object* AllocObjectInstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* AllocObjectUninstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
+
void RegisterNativeAllocation(int bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterNativeFree(int bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -368,11 +383,6 @@ class Heap {
accounting::ObjectStack* stack)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Update and mark mod union table based on gc type.
- void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
- collector::GcType gc_type)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
// Gets called when we get notified by ActivityThread that the process state has changed.
void ListenForProcessStateChange();
@@ -426,11 +436,28 @@ class Heap {
size_t GetConcGCThreadCount() const {
return conc_gc_threads_;
}
+ accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
+ void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
private:
+ bool TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
+ mirror::Object** obj_ptr, size_t* bytes_allocated)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
+ mirror::Object** obj_ptr, size_t* bytes_allocated)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count);
+ void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj);
+
// Allocates uninitialized storage. Passing in a null space tries to place the object in the
// large object space.
- template <class T> mirror::Object* Allocate(Thread* self, T* space, size_t num_bytes, size_t* bytes_allocated)
+ template <class T> mirror::Object* AllocateInstrumented(Thread* self, T* space, size_t num_bytes,
+ size_t* bytes_allocated)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <class T> mirror::Object* AllocateUninstrumented(Thread* self, T* space, size_t num_bytes,
+ size_t* bytes_allocated)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -442,17 +469,29 @@ class Heap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Try to allocate a number of bytes, this function never does any GCs.
- mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow,
- size_t* bytes_allocated)
+ mirror::Object* TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Try to allocate a number of bytes, this function never does any GCs. DlMallocSpace-specialized version.
- mirror::Object* TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, bool grow,
- size_t* bytes_allocated)
+ mirror::Object* TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
+ bool grow, size_t* bytes_allocated)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow);
// Pushes a list of cleared references out to the managed heap.
@@ -462,7 +501,11 @@ class Heap {
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
- void RecordAllocation(size_t size, mirror::Object* object)
+ size_t RecordAllocationInstrumented(size_t size, mirror::Object* object)
+ LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ size_t RecordAllocationUninstrumented(size_t size, mirror::Object* object)
LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -522,12 +565,8 @@ class Heap {
// The card table, dirtied by the write barrier.
UniquePtr<accounting::CardTable> card_table_;
- // The mod-union table remembers all of the references from the image space to the alloc /
- // zygote spaces to allow the card table to be cleared.
- UniquePtr<accounting::ModUnionTable> image_mod_union_table_;
-
- // This table holds all of the references from the zygote space to the alloc space.
- UniquePtr<accounting::ModUnionTable> zygote_mod_union_table_;
+ // A mod-union table remembers all of the references from the it's space to other spaces.
+ SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
// What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
// false for stop-the-world mark sweep.
@@ -615,9 +654,6 @@ class Heap {
// Since the heap was created, how many objects have been freed.
size_t total_objects_freed_ever_;
- // Primitive objects larger than this size are put in the large object space.
- const size_t large_object_threshold_;
-
// Number of bytes allocated. Adjusted after each allocation and free.
AtomicInteger num_bytes_allocated_;
@@ -719,6 +755,16 @@ class Heap {
friend class ScopedHeapLock;
friend class space::SpaceTest;
+ class AllocationTimer {
+ private:
+ Heap* heap_;
+ mirror::Object** allocated_obj_ptr_;
+ uint64_t allocation_start_time_;
+ public:
+ AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
+ ~AllocationTimer();
+ };
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
};
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 54811414e9..242ef6886e 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -30,7 +30,7 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b
MutexLock mu(self, lock_);
obj = AllocWithoutGrowthLocked(num_bytes, bytes_allocated);
}
- if (obj != NULL) {
+ if (LIKELY(obj != NULL)) {
// Zero freshly allocated memory, done while not holding the space's lock.
memset(obj, 0, num_bytes);
}
@@ -39,7 +39,7 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b
inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
- if (result != NULL) {
+ if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index a9440d3c1b..1b6fca7cbb 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -23,7 +23,7 @@
#include "utils.h"
#include <valgrind.h>
-#include <../memcheck/memcheck.h>
+#include <memcheck/memcheck.h>
namespace art {
namespace gc {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a174c0a1dc..c6d028eed5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -17,6 +17,7 @@
#include "large_object_space.h"
#include "base/logging.h"
+#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "UniquePtr.h"
#include "image.h"
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 0b2e741527..67620a09f1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -484,11 +484,11 @@ class Hprof {
}
private:
- static void RootVisitor(const mirror::Object* obj, void* arg)
+ static mirror::Object* RootVisitor(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(arg != NULL);
- Hprof* hprof = reinterpret_cast<Hprof*>(arg);
- hprof->VisitRoot(obj);
+ DCHECK(arg != NULL);
+ reinterpret_cast<Hprof*>(arg)->VisitRoot(obj);
+ return obj;
}
static void HeapBitmapCallback(mirror::Object* obj, void* arg)
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8af4d7eaab..2bd83538ac 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -40,7 +40,7 @@ IndirectReferenceTable::IndirectReferenceTable(size_t initialCount,
CHECK_LE(initialCount, maxCount);
CHECK_NE(desiredKind, kSirtOrInvalid);
- table_ = reinterpret_cast<const mirror::Object**>(malloc(initialCount * sizeof(const mirror::Object*)));
+ table_ = reinterpret_cast<mirror::Object**>(malloc(initialCount * sizeof(const mirror::Object*)));
CHECK(table_ != NULL);
memset(table_, 0xd1, initialCount * sizeof(const mirror::Object*));
@@ -75,7 +75,7 @@ bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int
return true;
}
-IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* obj) {
+IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
IRTSegmentState prevState;
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
@@ -101,7 +101,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* o
}
DCHECK_GT(newSize, alloc_entries_);
- table_ = reinterpret_cast<const mirror::Object**>(realloc(table_, newSize * sizeof(const mirror::Object*)));
+ table_ = reinterpret_cast<mirror::Object**>(realloc(table_, newSize * sizeof(mirror::Object*)));
slot_data_ = reinterpret_cast<IndirectRefSlot*>(realloc(slot_data_,
newSize * sizeof(IndirectRefSlot)));
if (table_ == NULL || slot_data_ == NULL) {
@@ -126,7 +126,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, const mirror::Object* o
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- const mirror::Object** pScan = &table_[topIndex - 1];
+ mirror::Object** pScan = &table_[topIndex - 1];
DCHECK(*pScan != NULL);
while (*--pScan != NULL) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
@@ -194,7 +194,8 @@ bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
return true;
}
-static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex, const mirror::Object** table) {
+static int Find(mirror::Object* direct_pointer, int bottomIndex, int topIndex,
+ mirror::Object** table) {
for (int i = bottomIndex; i < topIndex; ++i) {
if (table[i] == direct_pointer) {
return i;
@@ -310,13 +311,14 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
for (auto ref : *this) {
- visitor(*ref, arg);
+ *ref = visitor(const_cast<mirror::Object*>(*ref), arg);
+ DCHECK(*ref != nullptr);
}
}
void IndirectReferenceTable::Dump(std::ostream& os) const {
os << kind_ << " table dump:\n";
- std::vector<const mirror::Object*> entries(table_, table_ + Capacity());
+ ReferenceTable::Table entries(table_, table_ + Capacity());
// Remove NULLs.
for (int i = entries.size() - 1; i >= 0; --i) {
if (entries[i] == NULL) {
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 26f53db4ad..51b238c527 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -206,7 +206,7 @@ union IRTSegmentState {
class IrtIterator {
public:
- explicit IrtIterator(const mirror::Object** table, size_t i, size_t capacity)
+ explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
}
@@ -217,7 +217,7 @@ class IrtIterator {
return *this;
}
- const mirror::Object** operator*() {
+ mirror::Object** operator*() {
return &table_[i_];
}
@@ -233,7 +233,7 @@ class IrtIterator {
}
}
- const mirror::Object** table_;
+ mirror::Object** table_;
size_t i_;
size_t capacity_;
};
@@ -258,7 +258,7 @@ class IndirectReferenceTable {
* Returns NULL if the table is full (max entries reached, or alloc
* failed during expansion).
*/
- IndirectRef Add(uint32_t cookie, const mirror::Object* obj)
+ IndirectRef Add(uint32_t cookie, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -266,7 +266,7 @@ class IndirectReferenceTable {
*
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
- const mirror::Object* Get(IndirectRef iref) const {
+ mirror::Object* Get(IndirectRef iref) const {
if (!GetChecked(iref)) {
return kInvalidIndirectRefObject;
}
@@ -363,7 +363,7 @@ class IndirectReferenceTable {
IRTSegmentState segment_state_;
/* bottom of the stack */
- const mirror::Object** table_;
+ mirror::Object** table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
/* extended debugging info */
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index bd2890c497..b6c6cb4be3 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -17,6 +17,7 @@
#include "common_test.h"
#include "indirect_reference_table.h"
+#include "mirror/object-inl.h"
namespace art {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 6caad0110d..481cbad3b8 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -41,6 +41,11 @@
namespace art {
namespace instrumentation {
+// Do we want to deoptimize for method entry and exit listeners or just try to intercept
+// invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
+// application's performance.
+static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = false;
+
static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
@@ -264,12 +269,14 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
bool require_interpreter = false;
if ((events & kMethodEntered) != 0) {
method_entry_listeners_.push_back(listener);
- require_entry_exit_stubs = true;
+ require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_entry_exit_stubs = !kDeoptimizeForAccurateMethodEntryExitListeners;
have_method_entry_listeners_ = true;
}
if ((events & kMethodExited) != 0) {
method_exit_listeners_.push_back(listener);
- require_entry_exit_stubs = true;
+ require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_entry_exit_stubs = !kDeoptimizeForAccurateMethodEntryExitListeners;
have_method_exit_listeners_ = true;
}
if ((events & kMethodUnwind) != 0) {
@@ -300,7 +307,10 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
method_entry_listeners_.remove(listener);
}
have_method_entry_listeners_ = method_entry_listeners_.size() > 0;
- require_entry_exit_stubs |= have_method_entry_listeners_;
+ require_entry_exit_stubs |= have_method_entry_listeners_ &&
+ !kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_interpreter = have_method_entry_listeners_ &&
+ kDeoptimizeForAccurateMethodEntryExitListeners;
}
if ((events & kMethodExited) != 0) {
bool contains = std::find(method_exit_listeners_.begin(), method_exit_listeners_.end(),
@@ -309,7 +319,10 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
method_exit_listeners_.remove(listener);
}
have_method_exit_listeners_ = method_exit_listeners_.size() > 0;
- require_entry_exit_stubs |= have_method_exit_listeners_;
+ require_entry_exit_stubs |= have_method_exit_listeners_ &&
+ !kDeoptimizeForAccurateMethodEntryExitListeners;
+ require_interpreter = have_method_exit_listeners_ &&
+ kDeoptimizeForAccurateMethodEntryExitListeners;
}
if ((events & kMethodUnwind) != 0) {
method_unwind_listeners_.remove(listener);
@@ -455,7 +468,7 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o
void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method,
uint32_t catch_dex_pc,
- mirror::Throwable* exception_object) {
+ mirror::Throwable* exception_object) const {
if (have_exception_caught_listeners_) {
DCHECK_EQ(thread->GetException(NULL), exception_object);
thread->ClearException();
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 6c80b41b64..28f95553f8 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -186,7 +186,7 @@ class Instrumentation {
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ mirror::Throwable* exception_object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 20729790a9..8f9e072093 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -47,15 +47,16 @@ void InternTable::VisitRoots(RootVisitor* visitor, void* arg,
bool only_dirty, bool clean_dirty) {
MutexLock mu(Thread::Current(), intern_table_lock_);
if (!only_dirty || is_dirty_) {
- for (const auto& strong_intern : strong_interns_) {
- visitor(strong_intern.second, arg);
+ for (auto& strong_intern : strong_interns_) {
+ strong_intern.second = reinterpret_cast<mirror::String*>(visitor(strong_intern.second, arg));
+ DCHECK(strong_intern.second != nullptr);
}
+
if (clean_dirty) {
is_dirty_ = false;
}
}
- // Note: we deliberately don't visit the weak_interns_ table and the immutable
- // image roots.
+ // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
}
mirror::String* InternTable::Lookup(Table& table, mirror::String* s,
@@ -216,14 +217,16 @@ bool InternTable::ContainsWeak(mirror::String* s) {
return found == s;
}
-void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) {
+void InternTable::SweepInternTableWeaks(RootVisitor visitor, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- // TODO: std::remove_if + lambda.
for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
mirror::Object* object = it->second;
- if (!is_marked(object, arg)) {
+ mirror::Object* new_object = visitor(object, arg);
+ if (new_object == nullptr) {
+ // TODO: use it = weak_interns_.erase(it) when we get a c++11 stl.
weak_interns_.erase(it++);
} else {
+ it->second = down_cast<mirror::String*>(new_object);
++it;
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index e68af907ea..eec63c874f 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -55,8 +55,7 @@ class InternTable {
// Interns a potentially new string in the 'weak' table. (See above.)
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void SweepInternTableWeaks(RootVisitor visitor, void* arg);
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d79d2c4b31..aa2502d81f 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -81,8 +81,11 @@ class TestPredicate {
mutable std::vector<const mirror::String*> expected_;
};
-bool IsMarked(const mirror::Object* object, void* arg) {
- return reinterpret_cast<TestPredicate*>(arg)->IsMarked(object);
+mirror::Object* IsMarkedSweepingVisitor(mirror::Object* object, void* arg) {
+ if (reinterpret_cast<TestPredicate*>(arg)->IsMarked(object)) {
+ return object;
+ }
+ return nullptr;
}
TEST_F(InternTableTest, SweepInternTableWeaks) {
@@ -105,7 +108,7 @@ TEST_F(InternTableTest, SweepInternTableWeaks) {
p.Expect(s1.get());
{
ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
- t.SweepInternTableWeaks(IsMarked, &p);
+ t.SweepInternTableWeaks(IsMarkedSweepingVisitor, &p);
}
EXPECT_EQ(2U, t.Size());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8cd2ac8597..8aa6fa2104 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -14,153 +14,11 @@
* limitations under the License.
*/
-#include "interpreter.h"
-
-#include <math.h>
-
-#include "base/logging.h"
-#include "class_linker-inl.h"
-#include "common_throws.h"
-#include "dex_file-inl.h"
-#include "dex_instruction-inl.h"
-#include "dex_instruction.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "gc/accounting/card_table-inl.h"
-#include "invoke_arg_array_builder.h"
-#include "nth_caller_visitor.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "object_utils.h"
-#include "ScopedLocalRef.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "well_known_classes.h"
-
-using ::art::mirror::ArtField;
-using ::art::mirror::ArtMethod;
-using ::art::mirror::Array;
-using ::art::mirror::BooleanArray;
-using ::art::mirror::ByteArray;
-using ::art::mirror::CharArray;
-using ::art::mirror::Class;
-using ::art::mirror::ClassLoader;
-using ::art::mirror::IntArray;
-using ::art::mirror::LongArray;
-using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
-using ::art::mirror::ShortArray;
-using ::art::mirror::String;
-using ::art::mirror::Throwable;
+#include "interpreter_common.h"
namespace art {
-
namespace interpreter {
-static const int32_t kMaxInt = std::numeric_limits<int32_t>::max();
-static const int32_t kMinInt = std::numeric_limits<int32_t>::min();
-static const int64_t kMaxLong = std::numeric_limits<int64_t>::max();
-static const int64_t kMinLong = std::numeric_limits<int64_t>::min();
-
-static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
- JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // In a runtime that's not started we intercept certain methods to avoid complicated dependency
- // problems in core libraries.
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
- std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
- ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
- Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
- class_loader);
- CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
- << PrettyDescriptor(descriptor);
- result->SetL(found);
- } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
- CHECK(c != NULL);
- SirtRef<Object> obj(self, klass->AllocObject(self));
- CHECK(obj.get() != NULL);
- EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
- result->SetL(obj.get());
- } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
- // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
- // going the reflective Dex way.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- ArtField* found = NULL;
- FieldHelper fh;
- ObjectArray<ArtField>* fields = klass->GetIFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- fh.ChangeField(f);
- if (name->Equals(fh.GetName())) {
- found = f;
- }
- }
- if (found == NULL) {
- fields = klass->GetSFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- fh.ChangeField(f);
- if (name->Equals(fh.GetName())) {
- found = f;
- }
- }
- }
- CHECK(found != NULL)
- << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
- // TODO: getDeclaredField calls GetType once the field is found to ensure a
- // NoClassDefFoundError is thrown if the field's type cannot be resolved.
- Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- SirtRef<Object> field(self, jlr_Field->AllocObject(self));
- CHECK(field.get() != NULL);
- ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
- uint32_t args[1];
- args[0] = reinterpret_cast<uint32_t>(found);
- EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
- result->SetL(field.get());
- } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
- name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
- // Special case array copying without initializing System.
- Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
- jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
- jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
- jint length = shadow_frame->GetVReg(arg_offset + 4);
- if (!ctype->IsPrimitive()) {
- ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
- ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveChar()) {
- CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
- CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveInt()) {
- IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
- IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else {
- UNIMPLEMENTED(FATAL) << "System.arraycopy of unexpected type: " << PrettyDescriptor(ctype);
- }
- } else {
- // Not special, continue with regular interpreter execution.
- artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
- }
-}
-
// Hand select a number of methods to be run in a not yet started runtime without using JNI.
static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
Object* receiver, uint32_t* args, JValue* result)
@@ -224,7 +82,7 @@ static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
}
}
-static void InterpreterJni(Thread* self, ArtMethod* method, StringPiece shorty,
+static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
@@ -406,2703 +264,12 @@ static void InterpreterJni(Thread* self, ArtMethod* method, StringPiece shorty,
}
}
-static void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorEnter(self);
-}
-
-static void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorExit(self);
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<InvokeType type, bool is_range, bool do_access_check>
-static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
-
-template<InvokeType type, bool is_range, bool do_access_check>
-static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) {
- bool do_assignability_check = do_access_check;
- uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver = (type == kStatic) ? NULL : shadow_frame.GetVRegReference(vregC);
- ArtMethod* method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
- do_access_check, type);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(method->IsAbstract())) {
- ThrowAbstractMethodError(method);
- result->SetJ(0);
- return false;
- }
-
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- uint16_t num_regs;
- uint16_t num_ins;
- if (LIKELY(code_item != NULL)) {
- num_regs = code_item->registers_size_;
- num_ins = code_item->ins_size_;
- } else {
- DCHECK(method->IsNative() || method->IsProxyMethod());
- num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
- if (!method->IsStatic()) {
- num_regs++;
- num_ins++;
- }
- }
-
- void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory));
- size_t cur_reg = num_regs - num_ins;
- if (receiver != NULL) {
- new_shadow_frame->SetVRegReference(cur_reg, receiver);
- ++cur_reg;
- }
-
- const DexFile::TypeList* params;
- if (do_assignability_check) {
- params = mh.GetParameterTypeList();
- }
- size_t arg_offset = (receiver == NULL) ? 0 : 1;
- const char* shorty = mh.GetShorty();
- uint32_t arg[5];
- if (!is_range) {
- inst->GetArgs(arg);
- }
- for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
- DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
- size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
- switch (shorty[shorty_pos + 1]) {
- case 'L': {
- Object* o = shadow_frame.GetVRegReference(arg_pos);
- if (do_assignability_check && o != NULL) {
- Class* arg_type = mh.GetClassFromTypeIdx(params->GetTypeItem(shorty_pos).type_idx_);
- if (arg_type == NULL) {
- CHECK(self->IsExceptionPending());
- return false;
- }
- if (!o->VerifierInstanceOf(arg_type)) {
- // This should never happen.
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
- "Invoking %s with bad arg %d, type '%s' not instance of '%s'",
- mh.GetName(), shorty_pos,
- ClassHelper(o->GetClass()).GetDescriptor(),
- ClassHelper(arg_type).GetDescriptor());
- return false;
- }
- }
- new_shadow_frame->SetVRegReference(cur_reg, o);
- break;
- }
- case 'J': case 'D': {
- uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
- static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
- new_shadow_frame->SetVRegLong(cur_reg, wide_value);
- cur_reg++;
- arg_offset++;
- break;
- }
- default:
- new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
- break;
- }
- }
-
- if (LIKELY(Runtime::Current()->IsStarted())) {
- (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
- } else {
- UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
- }
- return !self->IsExceptionPending();
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<bool is_range>
-static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result)
- NO_THREAD_SAFETY_ANALYSIS;
+enum InterpreterImplKind {
+ kSwitchImpl, // switch-based interpreter implementation.
+ kComputedGotoImplKind // computed-goto-based interpreter implementation.
+};
-template<bool is_range>
-static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, JValue* result) {
- uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver = shadow_frame.GetVRegReference(vregC);
- if (UNLIKELY(receiver == NULL)) {
- // We lost the reference to the method index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- // TODO: use ObjectArray<T>::GetWithoutChecks ?
- ArtMethod* method = receiver->GetClass()->GetVTable()->Get(vtable_idx);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(method->IsAbstract())) {
- ThrowAbstractMethodError(method);
- result->SetJ(0);
- return false;
- }
-
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- uint16_t num_regs;
- uint16_t num_ins;
- if (code_item != NULL) {
- num_regs = code_item->registers_size_;
- num_ins = code_item->ins_size_;
- } else {
- DCHECK(method->IsNative() || method->IsProxyMethod());
- num_regs = num_ins = ArtMethod::NumArgRegisters(mh.GetShorty());
- if (!method->IsStatic()) {
- num_regs++;
- num_ins++;
- }
- }
-
- void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame,
- method, 0, memory));
- size_t cur_reg = num_regs - num_ins;
- if (receiver != NULL) {
- new_shadow_frame->SetVRegReference(cur_reg, receiver);
- ++cur_reg;
- }
-
- size_t arg_offset = (receiver == NULL) ? 0 : 1;
- const char* shorty = mh.GetShorty();
- uint32_t arg[5];
- if (!is_range) {
- inst->GetArgs(arg);
- }
- for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
- DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
- size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
- switch (shorty[shorty_pos + 1]) {
- case 'L': {
- Object* o = shadow_frame.GetVRegReference(arg_pos);
- new_shadow_frame->SetVRegReference(cur_reg, o);
- break;
- }
- case 'J': case 'D': {
- uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(arg_pos + 1)) << 32) |
- static_cast<uint32_t>(shadow_frame.GetVReg(arg_pos));
- new_shadow_frame->SetVRegLong(cur_reg, wide_value);
- cur_reg++;
- arg_offset++;
- break;
- }
- default:
- new_shadow_frame->SetVReg(cur_reg, shadow_frame.GetVReg(arg_pos));
- break;
- }
- }
-
- if (LIKELY(Runtime::Current()->IsStarted())) {
- (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
- } else {
- UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
- }
- return !self->IsExceptionPending();
-}
-
-// We use template functions to optimize compiler inlining process. Otherwise,
-// some parts of the code (like a switch statement) which depend on a constant
-// parameter would not be inlined while it should be. These constant parameters
-// are now part of the template arguments.
-// Note these template functions are static and inlined so they should not be
-// part of the final object file.
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
- find_type, Primitive::FieldSize(field_type),
- do_access_check);
- if (UNLIKELY(f == NULL)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
- Object* obj;
- if (is_static) {
- obj = f->GetDeclaringClass();
- } else {
- obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
- return false;
- }
- }
- uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimBoolean:
- shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
- break;
- case Primitive::kPrimByte:
- shadow_frame.SetVReg(vregA, f->GetByte(obj));
- break;
- case Primitive::kPrimChar:
- shadow_frame.SetVReg(vregA, f->GetChar(obj));
- break;
- case Primitive::kPrimShort:
- shadow_frame.SetVReg(vregA, f->GetShort(obj));
- break;
- case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, f->GetInt(obj));
- break;
- case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
- break;
- case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<Primitive::Type field_type>
-static bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<Primitive::Type field_type>
-static inline bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- // We lost the reference to the field index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- MemberOffset field_offset(inst->VRegC_22c());
- const bool is_volatile = false; // iget-x-quick only on non volatile fields.
- const uint32_t vregA = inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
- break;
- case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
- break;
- case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object*>(field_offset, is_volatile));
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
- const Instruction* inst) {
- bool do_assignability_check = do_access_check;
- bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
- find_type, Primitive::FieldSize(field_type),
- do_access_check);
- if (UNLIKELY(f == NULL)) {
- CHECK(self->IsExceptionPending());
- return false;
- }
- Object* obj;
- if (is_static) {
- obj = f->GetDeclaringClass();
- } else {
- obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
- f, false);
- return false;
- }
- }
- uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimBoolean:
- f->SetBoolean(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimByte:
- f->SetByte(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimChar:
- f->SetChar(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimShort:
- f->SetShort(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimInt:
- f->SetInt(obj, shadow_frame.GetVReg(vregA));
- break;
- case Primitive::kPrimLong:
- f->SetLong(obj, shadow_frame.GetVRegLong(vregA));
- break;
- case Primitive::kPrimNot: {
- Object* reg = shadow_frame.GetVRegReference(vregA);
- if (do_assignability_check && reg != NULL) {
- Class* field_class = FieldHelper(f).GetType();
- if (!reg->VerifierInstanceOf(field_class)) {
- // This should never happen.
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
- "Put '%s' that is not instance of field '%s' in '%s'",
- ClassHelper(reg->GetClass()).GetDescriptor(),
- ClassHelper(field_class).GetDescriptor(),
- ClassHelper(f->GetDeclaringClass()).GetDescriptor());
- return false;
- }
- }
- f->SetObj(obj, reg);
- break;
- }
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<Primitive::Type field_type>
-static bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template<Primitive::Type field_type>
-static inline bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst) {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- if (UNLIKELY(obj == NULL)) {
- // We lost the reference to the field index so we cannot get a more
- // precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return false;
- }
- MemberOffset field_offset(inst->VRegC_22c());
- const bool is_volatile = false; // iput-x-quick only on non volatile fields.
- const uint32_t vregA = inst->VRegA_22c();
- switch (field_type) {
- case Primitive::kPrimInt:
- obj->SetField32(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
- break;
- case Primitive::kPrimLong:
- obj->SetField64(field_offset, shadow_frame.GetVRegLong(vregA), is_volatile);
- break;
- case Primitive::kPrimNot:
- obj->SetFieldObject(field_offset, shadow_frame.GetVRegReference(vregA), is_volatile);
- break;
- default:
- LOG(FATAL) << "Unreachable: " << field_type;
- }
- return true;
-}
-
-static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* java_lang_string_class = String::GetJavaLangString();
- if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (UNLIKELY(!class_linker->EnsureInitialized(java_lang_string_class,
- true, true))) {
- DCHECK(self->IsExceptionPending());
- return NULL;
- }
- }
- return mh.ResolveString(string_idx);
-}
-
-static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
- int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
- shadow_frame.SetVReg(result_reg, kMinInt);
- } else {
- shadow_frame.SetVReg(result_reg, dividend / divisor);
- }
- return true;
-}
-
-static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
- int32_t dividend, int32_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
- shadow_frame.SetVReg(result_reg, 0);
- } else {
- shadow_frame.SetVReg(result_reg, dividend % divisor);
- }
- return true;
-}
-
-static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
- shadow_frame.SetVRegLong(result_reg, kMinLong);
- } else {
- shadow_frame.SetVRegLong(result_reg, dividend / divisor);
- }
- return true;
-}
-
-static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
- int64_t dividend, int64_t divisor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(divisor == 0)) {
- ThrowArithmeticExceptionDivideByZero();
- return false;
- }
- if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
- shadow_frame.SetVRegLong(result_reg, 0);
- } else {
- shadow_frame.SetVRegLong(result_reg, dividend % divisor);
- }
- return true;
-}
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-// Returns true on success, otherwise throws an exception and returns false.
-template <bool is_range, bool do_access_check>
-static bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
- Thread* self, JValue* result)
- NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-
-template <bool is_range, bool do_access_check>
-static inline bool DoFilledNewArray(const Instruction* inst,
- const ShadowFrame& shadow_frame,
- Thread* self, JValue* result) {
- DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
- inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
- const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
- if (!is_range) {
- // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
- CHECK_LE(length, 5);
- }
- if (UNLIKELY(length < 0)) {
- ThrowNegativeArraySizeException(length);
- return false;
- }
- uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(arrayClass == NULL)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- CHECK(arrayClass->IsArrayClass());
- Class* componentClass = arrayClass->GetComponentType();
- if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
- if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
- ThrowRuntimeException("Bad filled array request for type %s",
- PrettyDescriptor(componentClass).c_str());
- } else {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
- "Found type %s; filled-new-array not implemented for anything but \'int\'",
- PrettyDescriptor(componentClass).c_str());
- }
- return false;
- }
- Object* newArray = Array::Alloc(self, arrayClass, length);
- if (UNLIKELY(newArray == NULL)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- if (is_range) {
- uint32_t vregC = inst->VRegC_3rc();
- const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
- for (int32_t i = 0; i < length; ++i) {
- if (is_primitive_int_component) {
- newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(vregC + i));
- } else {
- newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(vregC + i));
- }
- }
- } else {
- uint32_t arg[5];
- inst->GetArgs(arg);
- const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
- for (int32_t i = 0; i < length; ++i) {
- if (is_primitive_int_component) {
- newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(arg[i]));
- } else {
- newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(arg[i]));
- }
- }
- }
-
- result->SetL(newArray);
- return true;
-}
-
-static inline const Instruction* DoSparseSwitch(const Instruction* inst,
- const ShadowFrame& shadow_frame)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
- const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
- DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
- uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
- const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
- const int32_t* entries = keys + size;
- DCHECK(IsAligned<4>(entries));
- int lo = 0;
- int hi = size - 1;
- while (lo <= hi) {
- int mid = (lo + hi) / 2;
- int32_t foundVal = keys[mid];
- if (test_val < foundVal) {
- hi = mid - 1;
- } else if (test_val > foundVal) {
- lo = mid + 1;
- } else {
- return inst->RelativeAt(entries[mid]);
- }
- }
- return inst->Next_3xx();
-}
-
-static inline const Instruction* FindNextInstructionFollowingException(Thread* self,
- ShadowFrame& shadow_frame,
- uint32_t dex_pc,
- const uint16_t* insns,
- SirtRef<Object>& this_object_ref,
- instrumentation::Instrumentation* instrumentation)
- ALWAYS_INLINE;
-
-static inline const Instruction* FindNextInstructionFollowingException(Thread* self,
- ShadowFrame& shadow_frame,
- uint32_t dex_pc,
- const uint16_t* insns,
- SirtRef<Object>& this_object_ref,
- instrumentation::Instrumentation* instrumentation)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- self->VerifyStack();
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- bool clear_exception;
- uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
- &clear_exception);
- if (found_dex_pc == DexFile::kDexNoIndex) {
- instrumentation->MethodUnwindEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), dex_pc);
- return NULL;
- } else {
- instrumentation->ExceptionCaughtEvent(self, throw_location,
- shadow_frame.GetMethod(),
- found_dex_pc, exception);
- if (clear_exception) {
- self->ClearException();
- }
- return Instruction::At(insns + found_dex_pc);
- }
-}
-
-#define HANDLE_PENDING_EXCEPTION() \
- CHECK(self->IsExceptionPending()); \
- inst = FindNextInstructionFollowingException(self, shadow_frame, inst->GetDexPc(insns), insns, \
- this_object_ref, instrumentation); \
- if (inst == NULL) { \
- return JValue(); /* Handled in caller. */ \
- }
-
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function) \
- if (UNLIKELY(is_exception_pending)) { \
- HANDLE_PENDING_EXCEPTION(); \
- } else { \
- inst = inst->next_function(); \
- }
-
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- __attribute__((cold, noreturn, noinline));
-
-static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
- exit(0); // Unreachable, keep GCC happy.
-}
-
-// Code to run before each dex instruction.
-#define PREAMBLE()
-
-// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
-// specialization.
-template<bool do_access_check>
-static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register)
- NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
-
-template<bool do_access_check>
-static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
- bool do_assignability_check = do_access_check;
- if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
- LOG(FATAL) << "Invalid shadow frame for interpreter use";
- return JValue();
- }
- self->VerifyStack();
- instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
-
- // As the 'this' object won't change during the execution of current code, we
- // want to cache it in local variables. Nevertheless, in order to let the
- // garbage collector access it, we store it into sirt references.
- SirtRef<Object> this_object_ref(self, shadow_frame.GetThisObject(code_item->ins_size_));
-
- uint32_t dex_pc = shadow_frame.GetDexPC();
- if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
- if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
- instrumentation->MethodEnterEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), 0);
- }
- }
- const uint16_t* const insns = code_item->insns_;
- const Instruction* inst = Instruction::At(insns + dex_pc);
- while (true) {
- dex_pc = inst->GetDexPc(insns);
- shadow_frame.SetDexPC(dex_pc);
- if (UNLIKELY(self->TestAllFlags())) {
- CheckSuspend(self);
- }
- if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), dex_pc);
- }
- const bool kTracing = false;
- if (kTracing) {
-#define TRACE_LOG std::cerr
- TRACE_LOG << PrettyMethod(shadow_frame.GetMethod())
- << StringPrintf("\n0x%x: ", dex_pc)
- << inst->DumpString(&mh.GetDexFile()) << "\n";
- for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
- uint32_t raw_value = shadow_frame.GetVReg(i);
- Object* ref_value = shadow_frame.GetVRegReference(i);
- TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value);
- if (ref_value != NULL) {
- if (ref_value->GetClass()->IsStringClass() &&
- ref_value->AsString()->GetCharArray() != NULL) {
- TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
- } else {
- TRACE_LOG << "/" << PrettyTypeOf(ref_value);
- }
- }
- }
- TRACE_LOG << "\n";
-#undef TRACE_LOG
- }
- switch (inst->Opcode()) {
- case Instruction::NOP:
- PREAMBLE();
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_FROM16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22x(),
- shadow_frame.GetVReg(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_32x(),
- shadow_frame.GetVReg(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_WIDE_FROM16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_22x(),
- shadow_frame.GetVRegLong(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_WIDE_16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_32x(),
- shadow_frame.GetVRegLong(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_OBJECT:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_12x(),
- shadow_frame.GetVRegReference(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_OBJECT_FROM16:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_22x(),
- shadow_frame.GetVRegReference(inst->VRegB_22x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MOVE_OBJECT_16:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_32x(),
- shadow_frame.GetVRegReference(inst->VRegB_32x()));
- inst = inst->Next_3xx();
- break;
- case Instruction::MOVE_RESULT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_11x(), result_register.GetI());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_RESULT_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_11x(), result_register.GetJ());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_RESULT_OBJECT:
- PREAMBLE();
- shadow_frame.SetVRegReference(inst->VRegA_11x(), result_register.GetL());
- inst = inst->Next_1xx();
- break;
- case Instruction::MOVE_EXCEPTION: {
- PREAMBLE();
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
- shadow_frame.SetVRegReference(inst->VRegA_11x(), exception);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::RETURN_VOID: {
- PREAMBLE();
- JValue result;
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_VOID_BARRIER: {
- PREAMBLE();
- ANDROID_MEMBAR_STORE();
- JValue result;
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN: {
- PREAMBLE();
- JValue result;
- result.SetJ(0);
- result.SetI(shadow_frame.GetVReg(inst->VRegA_11x()));
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_WIDE: {
- PREAMBLE();
- JValue result;
- result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x()));
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::RETURN_OBJECT: {
- PREAMBLE();
- JValue result;
- Object* obj_result = shadow_frame.GetVRegReference(inst->VRegA_11x());
- result.SetJ(0);
- result.SetL(obj_result);
- if (do_assignability_check && obj_result != NULL) {
- Class* return_type = MethodHelper(shadow_frame.GetMethod()).GetReturnType();
- if (return_type == NULL) {
- // Return the pending exception.
- HANDLE_PENDING_EXCEPTION();
- }
- if (!obj_result->VerifierInstanceOf(return_type)) {
- // This should never happen.
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
- "Returning '%s' that is not instance of return type '%s'",
- ClassHelper(obj_result->GetClass()).GetDescriptor(),
- ClassHelper(return_type).GetDescriptor());
- HANDLE_PENDING_EXCEPTION();
- }
- }
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
- instrumentation->MethodExitEvent(self, this_object_ref.get(),
- shadow_frame.GetMethod(), inst->GetDexPc(insns),
- result);
- }
- return result;
- }
- case Instruction::CONST_4: {
- PREAMBLE();
- uint4_t dst = inst->VRegA_11n();
- int4_t val = inst->VRegB_11n();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::CONST_16: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_21s();
- int16_t val = inst->VRegB_21s();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CONST: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_31i();
- int32_t val = inst->VRegB_31i();
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_3xx();
- break;
- }
- case Instruction::CONST_HIGH16: {
- PREAMBLE();
- uint8_t dst = inst->VRegA_21h();
- int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
- shadow_frame.SetVReg(dst, val);
- if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
- }
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CONST_WIDE_16:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_21s(), inst->VRegB_21s());
- inst = inst->Next_2xx();
- break;
- case Instruction::CONST_WIDE_32:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_31i(), inst->VRegB_31i());
- inst = inst->Next_3xx();
- break;
- case Instruction::CONST_WIDE:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_51l(), inst->VRegB_51l());
- inst = inst->Next_51l();
- break;
- case Instruction::CONST_WIDE_HIGH16:
- shadow_frame.SetVRegLong(inst->VRegA_21h(),
- static_cast<uint64_t>(inst->VRegB_21h()) << 48);
- inst = inst->Next_2xx();
- break;
- case Instruction::CONST_STRING: {
- PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), s);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::CONST_STRING_JUMBO: {
- PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_31c(), s);
- inst = inst->Next_3xx();
- }
- break;
- }
- case Instruction::CONST_CLASS: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), c);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::MONITOR_ENTER: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorEnter(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- }
- break;
- }
- case Instruction::MONITOR_EXIT: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- DoMonitorExit(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- }
- break;
- }
- case Instruction::CHECK_CAST: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c());
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
- ThrowClassCastException(c, obj->GetClass());
- HANDLE_PENDING_EXCEPTION();
- } else {
- inst = inst->Next_2xx();
- }
- }
- break;
- }
- case Instruction::INSTANCE_OF: {
- PREAMBLE();
- Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
- self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
- shadow_frame.SetVReg(inst->VRegA_22c(), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::ARRAY_LENGTH: {
- PREAMBLE();
- Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x());
- if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVReg(inst->VRegA_12x(), array->AsArray()->GetLength());
- inst = inst->Next_1xx();
- }
- break;
- }
- case Instruction::NEW_INSTANCE: {
- PREAMBLE();
- Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
- self, do_access_check);
- if (UNLIKELY(obj == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_21c(), obj);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::NEW_ARRAY: {
- PREAMBLE();
- int32_t length = shadow_frame.GetVReg(inst->VRegB_22c());
- Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
- length, self, do_access_check);
- if (UNLIKELY(obj == NULL)) {
- HANDLE_PENDING_EXCEPTION();
- } else {
- shadow_frame.SetVRegReference(inst->VRegA_22c(), obj);
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::FILLED_NEW_ARRAY: {
- PREAMBLE();
- bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::FILLED_NEW_ARRAY_RANGE: {
- PREAMBLE();
- bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
- self, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::FILL_ARRAY_DATA: {
- PREAMBLE();
- Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t());
- if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- Array* array = obj->AsArray();
- DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
- const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
- if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/ArrayIndexOutOfBoundsException;",
- "failed FILL_ARRAY_DATA; length=%d, index=%d",
- array->GetLength(), payload->element_count);
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint32_t size_in_bytes = payload->element_count * payload->element_width;
- memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
- inst = inst->Next_3xx();
- break;
- }
- case Instruction::THROW: {
- PREAMBLE();
- Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x());
- if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
- } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
- // This should never happen.
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
- "Throwing '%s' that is not instance of Throwable",
- ClassHelper(exception->GetClass()).GetDescriptor());
- } else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
- }
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- case Instruction::GOTO: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_10t());
- break;
- }
- case Instruction::GOTO_16: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_20t());
- break;
- }
- case Instruction::GOTO_32: {
- PREAMBLE();
- inst = inst->RelativeAt(inst->VRegA_30t());
- break;
- }
- case Instruction::PACKED_SWITCH: {
- PREAMBLE();
- const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
- int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
- DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
- uint16_t size = switch_data[1];
- DCHECK_GT(size, 0);
- const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- DCHECK(IsAligned<4>(keys));
- int32_t first_key = keys[0];
- const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
- DCHECK(IsAligned<4>(targets));
- int32_t index = test_val - first_key;
- if (index >= 0 && index < size) {
- inst = inst->RelativeAt(targets[index]);
- } else {
- inst = inst->Next_3xx();
- }
- break;
- }
- case Instruction::SPARSE_SWITCH: {
- PREAMBLE();
- inst = DoSparseSwitch(inst, shadow_frame);
- break;
- }
- case Instruction::CMPL_FLOAT: {
- PREAMBLE();
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMPG_FLOAT: {
- PREAMBLE();
- float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
- float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMPL_DOUBLE: {
- PREAMBLE();
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
-
- case Instruction::CMPG_DOUBLE: {
- PREAMBLE();
- double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
- double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
- int32_t result;
- if (val1 < val2) {
- result = -1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = 1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::CMP_LONG: {
- PREAMBLE();
- int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
- int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
- int32_t result;
- if (val1 > val2) {
- result = 1;
- } else if (val1 == val2) {
- result = 0;
- } else {
- result = -1;
- }
- shadow_frame.SetVReg(inst->VRegA_23x(), result);
- inst = inst->Next_2xx();
- break;
- }
- case Instruction::IF_EQ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) == shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_NE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) != shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LT: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) < shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) >= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GT: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) > shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LE: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t()) <= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = inst->RelativeAt(inst->VRegC_22t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_EQZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) == 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_NEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) != 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LTZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) < 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) >= 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_GTZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) > 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::IF_LEZ: {
- PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_21t()) <= 0) {
- inst = inst->RelativeAt(inst->VRegB_21t());
- } else {
- inst = inst->Next_2xx();
- }
- break;
- }
- case Instruction::AGET_BOOLEAN: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_BYTE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_CHAR: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_SHORT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVReg(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_WIDE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVRegLong(inst->VRegA_23x(), array->GetData()[index]);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::AGET_OBJECT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->IsValidIndex(index))) {
- shadow_frame.SetVRegReference(inst->VRegA_23x(), array->GetWithoutChecks(index));
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_BOOLEAN: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_BYTE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int8_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ByteArray* array = a->AsByteArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_CHAR: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- CharArray* array = a->AsCharArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_SHORT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int16_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- ShortArray* array = a->AsShortArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t val = shadow_frame.GetVReg(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- IntArray* array = a->AsIntArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_WIDE: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x());
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- LongArray* array = a->AsLongArray();
- if (LIKELY(array->IsValidIndex(index))) {
- array->GetData()[index] = val;
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::APUT_OBJECT: {
- PREAMBLE();
- Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- HANDLE_PENDING_EXCEPTION();
- break;
- }
- int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
- Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x());
- ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
- array->SetWithoutChecks(index, val);
- inst = inst->Next_2xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
- break;
- }
- case Instruction::IGET_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_BYTE: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_CHAR: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_SHORT: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_WIDE: {
- PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_OBJECT: {
- PREAMBLE();
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_WIDE_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IGET_OBJECT_QUICK: {
- PREAMBLE();
- bool success = DoIGetQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_BYTE: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_CHAR: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_SHORT: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_WIDE: {
- PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SGET_OBJECT: {
- PREAMBLE();
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_BYTE: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_CHAR: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_SHORT: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_WIDE: {
- PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_OBJECT: {
- PREAMBLE();
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_WIDE_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::IPUT_OBJECT_QUICK: {
- PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_BOOLEAN: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_BYTE: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_CHAR: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_SHORT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_WIDE: {
- PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SPUT_OBJECT: {
- PREAMBLE();
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL: {
- PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_SUPER: {
- PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_SUPER_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_DIRECT: {
- PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_DIRECT_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_INTERFACE: {
- PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_INTERFACE_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_STATIC: {
- PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_STATIC_RANGE: {
- PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_QUICK: {
- PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
- PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
- break;
- }
- case Instruction::NEG_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), -shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NOT_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), ~shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), -shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NOT_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), ~shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), -shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::NEG_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), -shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::LONG_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::FLOAT_TO_INT: {
- PREAMBLE();
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x());
- int32_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<float>(kMaxInt)) {
- result = kMaxInt;
- } else if (val < static_cast<float>(kMinInt)) {
- result = kMinInt;
- } else {
- result = val;
- }
- shadow_frame.SetVReg(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::FLOAT_TO_LONG: {
- PREAMBLE();
- float val = shadow_frame.GetVRegFloat(inst->VRegB_12x());
- int64_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<float>(kMaxLong)) {
- result = kMaxLong;
- } else if (val < static_cast<float>(kMinLong)) {
- result = kMinLong;
- } else {
- result = val;
- }
- shadow_frame.SetVRegLong(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::FLOAT_TO_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(), shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::DOUBLE_TO_INT: {
- PREAMBLE();
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x());
- int32_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<double>(kMaxInt)) {
- result = kMaxInt;
- } else if (val < static_cast<double>(kMinInt)) {
- result = kMinInt;
- } else {
- result = val;
- }
- shadow_frame.SetVReg(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DOUBLE_TO_LONG: {
- PREAMBLE();
- double val = shadow_frame.GetVRegDouble(inst->VRegB_12x());
- int64_t result;
- if (val != val) {
- result = 0;
- } else if (val > static_cast<double>(kMaxLong)) {
- result = kMaxLong;
- } else if (val < static_cast<double>(kMinLong)) {
- result = kMinLong;
- } else {
- result = val;
- }
- shadow_frame.SetVRegLong(inst->VRegA_12x(), result);
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DOUBLE_TO_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(), shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_BYTE:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_CHAR:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::INT_TO_SHORT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- case Instruction::ADD_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::SHL_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::AND_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) &
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) |
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()) ^
- shadow_frame.GetVReg(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_LONG:
- PREAMBLE();
- DoLongDivide(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
- break;
- case Instruction::REM_LONG:
- PREAMBLE();
- DoLongRemainder(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
- break;
- case Instruction::AND_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) &
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) |
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHL_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_LONG:
- PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_23x(),
- static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
- (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
- shadow_frame.GetVRegFloat(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::REM_FLOAT:
- PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_23x(),
- fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
- shadow_frame.GetVRegFloat(inst->VRegC_23x())));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::SUB_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
- shadow_frame.GetVRegDouble(inst->VRegC_23x()));
- inst = inst->Next_2xx();
- break;
- case Instruction::REM_DOUBLE:
- PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_23x(),
- fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
- shadow_frame.GetVRegDouble(inst->VRegC_23x())));
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
- break;
- }
- case Instruction::REM_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
- break;
- }
- case Instruction::SHL_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::USHR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::AND_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) &
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::OR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) |
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::XOR_INT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) ^
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- break;
- }
- case Instruction::REM_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
- break;
- }
- case Instruction::AND_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) &
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::OR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) |
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::XOR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) ^
- shadow_frame.GetVRegLong(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHL_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) <<
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SHR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::USHR_LONG_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegLong(vregA,
- static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
- (shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) +
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) -
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) *
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- shadow_frame.GetVRegFloat(vregA) /
- shadow_frame.GetVRegFloat(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::REM_FLOAT_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegFloat(vregA,
- fmodf(shadow_frame.GetVRegFloat(vregA),
- shadow_frame.GetVRegFloat(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) +
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::SUB_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) -
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::MUL_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) *
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::DIV_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- shadow_frame.GetVRegDouble(vregA) /
- shadow_frame.GetVRegDouble(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::REM_DOUBLE_2ADDR: {
- PREAMBLE();
- uint4_t vregA = inst->VRegA_12x();
- shadow_frame.SetVRegDouble(vregA,
- fmod(shadow_frame.GetVRegDouble(vregA),
- shadow_frame.GetVRegDouble(inst->VRegB_12x())));
- inst = inst->Next_1xx();
- break;
- }
- case Instruction::ADD_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) +
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::RSUB_INT:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) *
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT_LIT16: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT_LIT16: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::AND_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) &
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) |
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT_LIT16:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()) ^
- inst->VRegC_22s());
- inst = inst->Next_2xx();
- break;
- case Instruction::ADD_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::RSUB_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
- inst = inst->Next_2xx();
- break;
- case Instruction::MUL_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::DIV_INT_LIT8: {
- PREAMBLE();
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::REM_INT_LIT8: {
- PREAMBLE();
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
- break;
- }
- case Instruction::AND_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) &
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::OR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) |
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::XOR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) ^
- inst->VRegC_22b());
- inst = inst->Next_2xx();
- break;
- case Instruction::SHL_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) <<
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::SHR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()) >>
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::USHR_INT_LIT8:
- PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_22b(),
- static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
- (inst->VRegC_22b() & 0x1f));
- inst = inst->Next_2xx();
- break;
- case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
- case Instruction::UNUSED_79:
- case Instruction::UNUSED_7A:
- UnexpectedOpcode(inst, mh);
- }
- }
-} // NOLINT(readability/fn_size)
+static const InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register)
@@ -3114,12 +281,23 @@ static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::Code
shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass());
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
- if (shadow_frame.GetMethod()->IsPreverified()) {
+
+ if (LIKELY(shadow_frame.GetMethod()->IsPreverified())) {
// Enter the "without access check" interpreter.
- return ExecuteImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ if (kInterpreterImplKind == kSwitchImpl) {
+ return ExecuteSwitchImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
+ return ExecuteGotoImpl<false>(self, mh, code_item, shadow_frame, result_register);
+ }
} else {
// Enter the "with access check" interpreter.
- return ExecuteImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ if (kInterpreterImplKind == kSwitchImpl) {
+ return ExecuteSwitchImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ } else {
+ DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
+ return ExecuteGotoImpl<true>(self, mh, code_item, shadow_frame, result_register);
+ }
}
}
@@ -3244,15 +422,18 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh
}
ArtMethod* method = shadow_frame->GetMethod();
- if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
- true, true)) {
- DCHECK(Thread::Current()->IsExceptionPending());
- return;
+ // Ensure static methods are initialized.
+ if (method->IsStatic()) {
+ Class* declaringClass = method->GetDeclaringClass();
+ if (UNLIKELY(!declaringClass->IsInitializing())) {
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass,
+ true, true))) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return;
+ }
+ CHECK(declaringClass->IsInitializing());
}
- CHECK(method->GetDeclaringClass()->IsInitializing());
}
-
self->PushShadowFrame(shadow_frame);
if (LIKELY(!method->IsNative())) {
@@ -3267,7 +448,6 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh
}
self->PopShadowFrame();
- return;
}
} // namespace interpreter
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
new file mode 100644
index 0000000000..6f87a8f35b
--- /dev/null
+++ b/runtime/interpreter/interpreter_common.cc
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
+static inline void AssignRegister(ShadowFrame& new_shadow_frame, const ShadowFrame& shadow_frame,
+ size_t dest_reg, size_t src_reg) {
+ // If both register locations contains the same value, the register probably holds a reference.
+ int32_t src_value = shadow_frame.GetVReg(src_reg);
+ mirror::Object* o = shadow_frame.GetVRegReference(src_reg);
+ if (src_value == reinterpret_cast<int32_t>(o)) {
+ new_shadow_frame.SetVRegReference(dest_reg, o);
+ } else {
+ new_shadow_frame.SetVReg(dest_reg, src_value);
+ }
+}
+
+template<bool is_range, bool do_assignability_check>
+bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data, JValue* result) {
+ // Compute method information.
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ uint16_t num_regs;
+ if (LIKELY(code_item != NULL)) {
+ num_regs = code_item->registers_size_;
+ DCHECK_EQ(num_ins, code_item->ins_size_);
+ } else {
+ DCHECK(method->IsNative() || method->IsProxyMethod());
+ num_regs = num_ins;
+ }
+
+ // Allocate shadow frame on the stack.
+ void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+ ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory));
+
+ // Initialize new shadow frame.
+ const size_t first_dest_reg = num_regs - num_ins;
+ if (do_assignability_check) {
+ // Slow path: we need to do runtime check on reference assignment. We need to load the shorty
+ // to get the exact type of each reference argument.
+ const DexFile::TypeList* params = mh.GetParameterTypeList();
+ const char* shorty = mh.GetShorty();
+
+ // Handle receiver apart since it's not part of the shorty.
+ size_t dest_reg = first_dest_reg;
+ size_t arg_offset = 0;
+ if (receiver != NULL) {
+ DCHECK(!method->IsStatic());
+ new_shadow_frame->SetVRegReference(dest_reg, receiver);
+ ++dest_reg;
+ ++arg_offset;
+ } else {
+ DCHECK(method->IsStatic());
+ }
+ // TODO: find a cleaner way to separate non-range and range information without duplicating code.
+ uint32_t arg[5]; // only used in invoke-XXX.
+ uint32_t vregC; // only used in invoke-XXX-range.
+ if (is_range) {
+ vregC = inst->VRegC_3rc();
+ } else {
+ inst->GetArgs(arg, inst_data);
+ }
+ for (size_t shorty_pos = 0; dest_reg < num_regs; ++shorty_pos, ++dest_reg, ++arg_offset) {
+ DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
+ const size_t src_reg = (is_range) ? vregC + arg_offset : arg[arg_offset];
+ switch (shorty[shorty_pos + 1]) {
+ case 'L': {
+ Object* o = shadow_frame.GetVRegReference(src_reg);
+ if (do_assignability_check && o != NULL) {
+ Class* arg_type = mh.GetClassFromTypeIdx(params->GetTypeItem(shorty_pos).type_idx_);
+ if (arg_type == NULL) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
+ if (!o->VerifierInstanceOf(arg_type)) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Invoking %s with bad arg %d, type '%s' not instance of '%s'",
+ mh.GetName(), shorty_pos,
+ ClassHelper(o->GetClass()).GetDescriptor(),
+ ClassHelper(arg_type).GetDescriptor());
+ return false;
+ }
+ }
+ new_shadow_frame->SetVRegReference(dest_reg, o);
+ break;
+ }
+ case 'J': case 'D': {
+ uint64_t wide_value = (static_cast<uint64_t>(shadow_frame.GetVReg(src_reg + 1)) << 32) |
+ static_cast<uint32_t>(shadow_frame.GetVReg(src_reg));
+ new_shadow_frame->SetVRegLong(dest_reg, wide_value);
+ ++dest_reg;
+ ++arg_offset;
+ break;
+ }
+ default:
+ new_shadow_frame->SetVReg(dest_reg, shadow_frame.GetVReg(src_reg));
+ break;
+ }
+ }
+ } else {
+ // Fast path: no extra checks.
+ if (is_range) {
+ const uint16_t first_src_reg = inst->VRegC_3rc();
+ for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < num_regs;
+ ++dest_reg, ++src_reg) {
+ AssignRegister(*new_shadow_frame, shadow_frame, dest_reg, src_reg);
+ }
+ } else {
+ DCHECK_LE(num_ins, 5U);
+ uint16_t regList = inst->Fetch16(2);
+ uint16_t count = num_ins;
+ if (count == 5) {
+ AssignRegister(*new_shadow_frame, shadow_frame, first_dest_reg + 4U, (inst_data >> 8) & 0x0f);
+ --count;
+ }
+ for (size_t arg_index = 0; arg_index < count; ++arg_index, regList >>= 4) {
+ AssignRegister(*new_shadow_frame, shadow_frame, first_dest_reg + arg_index, regList & 0x0f);
+ }
+ }
+ }
+
+ // Do the call now.
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
+ } else {
+ UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, first_dest_reg);
+ }
+ return !self->IsExceptionPending();
+}
+
+template <bool is_range, bool do_access_check>
+bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
+ Thread* self, JValue* result) {
+ DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
+ inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
+ const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+ if (!is_range) {
+ // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
+ CHECK_LE(length, 5);
+ }
+ if (UNLIKELY(length < 0)) {
+ ThrowNegativeArraySizeException(length);
+ return false;
+ }
+ uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(arrayClass == NULL)) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+ CHECK(arrayClass->IsArrayClass());
+ Class* componentClass = arrayClass->GetComponentType();
+ if (UNLIKELY(componentClass->IsPrimitive() && !componentClass->IsPrimitiveInt())) {
+ if (componentClass->IsPrimitiveLong() || componentClass->IsPrimitiveDouble()) {
+ ThrowRuntimeException("Bad filled array request for type %s",
+ PrettyDescriptor(componentClass).c_str());
+ } else {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/InternalError;",
+ "Found type %s; filled-new-array not implemented for anything but \'int\'",
+ PrettyDescriptor(componentClass).c_str());
+ }
+ return false;
+ }
+ Object* newArray = Array::Alloc(self, arrayClass, length);
+ if (UNLIKELY(newArray == NULL)) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+ if (is_range) {
+ uint32_t vregC = inst->VRegC_3rc();
+ const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
+ for (int32_t i = 0; i < length; ++i) {
+ if (is_primitive_int_component) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(vregC + i));
+ } else {
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(vregC + i));
+ }
+ }
+ } else {
+ uint32_t arg[5];
+ inst->GetArgs(arg);
+ const bool is_primitive_int_component = componentClass->IsPrimitiveInt();
+ for (int32_t i = 0; i < length; ++i) {
+ if (is_primitive_int_component) {
+ newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(arg[i]));
+ } else {
+ newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(arg[i]));
+ }
+ }
+ }
+
+ result->SetL(newArray);
+ return true;
+}
+
+static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset) {
+ // In a runtime that's not started we intercept certain methods to avoid complicated dependency
+ // problems in core libraries.
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
+ std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
+ ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ Class* found = Runtime::Current()->GetClassLinker()->FindClass(descriptor.c_str(),
+ class_loader);
+ CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
+ << PrettyDescriptor(descriptor);
+ result->SetL(found);
+ } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
+ Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
+ CHECK(c != NULL);
+ SirtRef<Object> obj(self, klass->AllocObject(self));
+ CHECK(obj.get() != NULL);
+ EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
+ result->SetL(obj.get());
+ } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
+ // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
+ // going the reflective Dex way.
+ Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ ArtField* found = NULL;
+ FieldHelper fh;
+ ObjectArray<ArtField>* fields = klass->GetIFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
+ ArtField* f = fields->Get(i);
+ fh.ChangeField(f);
+ if (name->Equals(fh.GetName())) {
+ found = f;
+ }
+ }
+ if (found == NULL) {
+ fields = klass->GetSFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
+ ArtField* f = fields->Get(i);
+ fh.ChangeField(f);
+ if (name->Equals(fh.GetName())) {
+ found = f;
+ }
+ }
+ }
+ CHECK(found != NULL)
+ << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
+ << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+ // TODO: getDeclaredField calls GetType once the field is found to ensure a
+ // NoClassDefFoundError is thrown if the field's type cannot be resolved.
+ Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
+ SirtRef<Object> field(self, jlr_Field->AllocObject(self));
+ CHECK(field.get() != NULL);
+ ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
+ uint32_t args[1];
+ args[0] = reinterpret_cast<uint32_t>(found);
+ EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
+ result->SetL(field.get());
+ } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
+ name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
+ // Special case array copying without initializing System.
+ Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
+ jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
+ jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
+ jint length = shadow_frame->GetVReg(arg_offset + 4);
+ if (!ctype->IsPrimitive()) {
+ ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
+ ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveChar()) {
+ CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
+ CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveInt()) {
+ IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
+ IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else {
+ UNIMPLEMENTED(FATAL) << "System.arraycopy of unexpected type: " << PrettyDescriptor(ctype);
+ }
+ } else {
+ // Not special, continue with regular interpreter execution.
+ artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
+ }
+}
+
+// Explicit DoCall template function declarations.
+#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
+template bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Object* receiver, \
+ Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, JValue* result)
+EXPLICIT_DO_CALL_TEMPLATE_DECL(false, false);
+EXPLICIT_DO_CALL_TEMPLATE_DECL(false, true);
+EXPLICIT_DO_CALL_TEMPLATE_DECL(true, false);
+EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true);
+#undef EXPLICIT_DO_CALL_TEMPLATE_DECL
+
+// Explicit DoFilledNewArray template function declarations.
+#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check) \
+ template bool DoFilledNewArray<_is_range_, _check>(const Instruction* inst, \
+ const ShadowFrame& shadow_frame, \
+ Thread* self, JValue* result)
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, false);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(false, true);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, false);
+EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(true, true);
+#undef EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
new file mode 100644
index 0000000000..80502b44f6
--- /dev/null
+++ b/runtime/interpreter/interpreter_common.h
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
+
+#include "interpreter.h"
+
+#include <math.h>
+
+#include "base/logging.h"
+#include "class_linker-inl.h"
+#include "common_throws.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
+#include "invoke_arg_array_builder.h"
+#include "nth_caller_visitor.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+using ::art::mirror::ArtField;
+using ::art::mirror::ArtMethod;
+using ::art::mirror::Array;
+using ::art::mirror::BooleanArray;
+using ::art::mirror::ByteArray;
+using ::art::mirror::CharArray;
+using ::art::mirror::Class;
+using ::art::mirror::ClassLoader;
+using ::art::mirror::IntArray;
+using ::art::mirror::LongArray;
+using ::art::mirror::Object;
+using ::art::mirror::ObjectArray;
+using ::art::mirror::ShortArray;
+using ::art::mirror::String;
+using ::art::mirror::Throwable;
+
+namespace art {
+namespace interpreter {
+
+// External references to both interpreter implementations.
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool do_access_check>
+extern JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register)
+ NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool do_access_check>
+extern JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register)
+ NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+
+static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorEnter(self);
+}
+
+static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorExit(self);
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool is_range, bool do_assignability_check>
+bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<InvokeType type, bool is_range, bool do_access_check>
+static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
+ uint16_t inst_data, JValue* result) NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<InvokeType type, bool is_range, bool do_access_check>
+static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
+ uint16_t inst_data, JValue* result) {
+ const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ Object* const receiver = (type == kStatic) ? NULL : shadow_frame.GetVRegReference(vregC);
+ ArtMethod* const method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
+ do_access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
+ } else {
+ return DoCall<is_range, do_access_check>(method, receiver, self, shadow_frame, inst,
+ inst_data, result);
+ }
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<bool is_range>
+static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
+ uint16_t inst_data, JValue* result)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<bool is_range>
+static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data,
+ JValue* result) {
+ const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ Object* const receiver = shadow_frame.GetVRegReference(vregC);
+ if (UNLIKELY(receiver == NULL)) {
+ // We lost the reference to the method index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ ArtMethod* const method = receiver->GetClass()->GetVTable()->GetWithoutChecks(vtable_idx);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
+ } else {
+ // No need to check since we've been quickened.
+ return DoCall<is_range, false>(method, receiver, self, shadow_frame, inst, inst_data, result);
+ }
+}
+
+// We use template functions to optimize compiler inlining process. Otherwise,
+// some parts of the code (like a switch statement) which depend on a constant
+// parameter would not be inlined while it should be. These constant parameters
+// are now part of the template arguments.
+// Note these template functions are static and inlined so they should not be
+// part of the final object file.
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data) {
+ bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type),
+ do_access_check);
+ if (UNLIKELY(f == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
+ return false;
+ }
+ }
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
+ break;
+ case Primitive::kPrimByte:
+ shadow_frame.SetVReg(vregA, f->GetByte(obj));
+ break;
+ case Primitive::kPrimChar:
+ shadow_frame.SetVReg(vregA, f->GetChar(obj));
+ break;
+ case Primitive::kPrimShort:
+ shadow_frame.SetVReg(vregA, f->GetShort(obj));
+ break;
+ case Primitive::kPrimInt:
+ shadow_frame.SetVReg(vregA, f->GetInt(obj));
+ break;
+ case Primitive::kPrimLong:
+ shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
+ break;
+ case Primitive::kPrimNot:
+ shadow_frame.SetVRegReference(vregA, f->GetObject(obj));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<Primitive::Type field_type>
+static bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<Primitive::Type field_type>
+static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ // We lost the reference to the field index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ MemberOffset field_offset(inst->VRegC_22c());
+ const bool is_volatile = false; // iget-x-quick only on non volatile fields.
+ const uint32_t vregA = inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimInt:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
+ break;
+ case Primitive::kPrimLong:
+ shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
+ break;
+ case Primitive::kPrimNot:
+ shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object*>(field_offset, is_volatile));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
+static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst, uint16_t inst_data) {
+ bool do_assignability_check = do_access_check;
+ bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type),
+ do_access_check);
+ if (UNLIKELY(f == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return false;
+ }
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
+ f, false);
+ return false;
+ }
+ }
+ uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ f->SetBoolean(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimByte:
+ f->SetByte(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimChar:
+ f->SetChar(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimShort:
+ f->SetShort(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimInt:
+ f->SetInt(obj, shadow_frame.GetVReg(vregA));
+ break;
+ case Primitive::kPrimLong:
+ f->SetLong(obj, shadow_frame.GetVRegLong(vregA));
+ break;
+ case Primitive::kPrimNot: {
+ Object* reg = shadow_frame.GetVRegReference(vregA);
+ if (do_assignability_check && reg != NULL) {
+ Class* field_class = FieldHelper(f).GetType();
+ if (!reg->VerifierInstanceOf(field_class)) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Put '%s' that is not instance of field '%s' in '%s'",
+ ClassHelper(reg->GetClass()).GetDescriptor(),
+ ClassHelper(field_class).GetDescriptor(),
+ ClassHelper(f->GetDeclaringClass()).GetDescriptor());
+ return false;
+ }
+ }
+ f->SetObj(obj, reg);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<Primitive::Type field_type>
+static bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+
+template<Primitive::Type field_type>
+static inline bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ // We lost the reference to the field index so we cannot get a more
+ // precised exception message.
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ return false;
+ }
+ MemberOffset field_offset(inst->VRegC_22c());
+ const bool is_volatile = false; // iput-x-quick only on non volatile fields.
+ const uint32_t vregA = inst->VRegA_22c(inst_data);
+ switch (field_type) {
+ case Primitive::kPrimInt:
+ obj->SetField32(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
+ break;
+ case Primitive::kPrimLong:
+ obj->SetField64(field_offset, shadow_frame.GetVRegLong(vregA), is_volatile);
+ break;
+ case Primitive::kPrimNot:
+ obj->SetFieldObject(field_offset, shadow_frame.GetVRegReference(vregA), is_volatile);
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ return true;
+}
+
+static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* java_lang_string_class = String::GetJavaLangString();
+ if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (UNLIKELY(!class_linker->EnsureInitialized(java_lang_string_class,
+ true, true))) {
+ DCHECK(self->IsExceptionPending());
+ return NULL;
+ }
+ }
+ return mh.ResolveString(string_idx);
+}
+
+static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
+ int32_t dividend, int32_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ shadow_frame.SetVReg(result_reg, kMinInt);
+ } else {
+ shadow_frame.SetVReg(result_reg, dividend / divisor);
+ }
+ return true;
+}
+
+static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+ int32_t dividend, int32_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ shadow_frame.SetVReg(result_reg, 0);
+ } else {
+ shadow_frame.SetVReg(result_reg, dividend % divisor);
+ }
+ return true;
+}
+
+static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
+ int64_t dividend, int64_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const int64_t kMinLong = std::numeric_limits<int64_t>::min();
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ shadow_frame.SetVRegLong(result_reg, kMinLong);
+ } else {
+ shadow_frame.SetVRegLong(result_reg, dividend / divisor);
+ }
+ return true;
+}
+
+static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+ int64_t dividend, int64_t divisor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const int64_t kMinLong = std::numeric_limits<int64_t>::min();
+ if (UNLIKELY(divisor == 0)) {
+ ThrowArithmeticExceptionDivideByZero();
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ shadow_frame.SetVRegLong(result_reg, 0);
+ } else {
+ shadow_frame.SetVRegLong(result_reg, dividend % divisor);
+ }
+ return true;
+}
+
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+// Returns true on success, otherwise throws an exception and returns false.
+template <bool is_range, bool do_access_check>
+bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
+ Thread* self, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
+
+static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
+ uint16_t inst_data)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
+ DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
+ uint16_t size = switch_data[1];
+ DCHECK_GT(size, 0);
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
+ DCHECK(IsAligned<4>(keys));
+ int32_t first_key = keys[0];
+ const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
+ DCHECK(IsAligned<4>(targets));
+ int32_t index = test_val - first_key;
+ if (index >= 0 && index < size) {
+ return targets[index];
+ } else {
+ // No corresponding value: move forward by 3 (size of PACKED_SWITCH).
+ return 3;
+ }
+}
+
+static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
+ uint16_t inst_data)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
+ DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+ uint16_t size = switch_data[1];
+ DCHECK_GT(size, 0);
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
+ DCHECK(IsAligned<4>(keys));
+ const int32_t* entries = keys + size;
+ DCHECK(IsAligned<4>(entries));
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) / 2;
+ int32_t foundVal = keys[mid];
+ if (test_val < foundVal) {
+ hi = mid - 1;
+ } else if (test_val > foundVal) {
+ lo = mid + 1;
+ } else {
+ return entries[mid];
+ }
+ }
+ // No corresponding value: move forward by 3 (size of SPARSE_SWITCH).
+ return 3;
+}
+
+static inline uint32_t FindNextInstructionFollowingException(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t dex_pc,
+ mirror::Object* this_object,
+ const instrumentation::Instrumentation* instrumentation)
+ ALWAYS_INLINE;
+
+static inline uint32_t FindNextInstructionFollowingException(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t dex_pc,
+ mirror::Object* this_object,
+ const instrumentation::Instrumentation* instrumentation)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ self->VerifyStack();
+ ThrowLocation throw_location;
+ mirror::Throwable* exception = self->GetException(&throw_location);
+ bool clear_exception = false;
+ uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
+ &clear_exception);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ instrumentation->MethodUnwindEvent(self, this_object,
+ shadow_frame.GetMethod(), dex_pc);
+ } else {
+ instrumentation->ExceptionCaughtEvent(self, throw_location,
+ shadow_frame.GetMethod(),
+ found_dex_pc, exception);
+ if (clear_exception) {
+ self->ClearException();
+ }
+ }
+ return found_dex_pc;
+}
+
+static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
+ __attribute__((cold, noreturn, noinline));
+
+static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
+ exit(0); // Unreachable, keep GCC happy.
+}
+
+static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
+ const uint32_t dex_pc, MethodHelper& mh)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const bool kTracing = false;
+ if (kTracing) {
+#define TRACE_LOG std::cerr
+ TRACE_LOG << PrettyMethod(shadow_frame.GetMethod())
+ << StringPrintf("\n0x%x: ", dex_pc)
+ << inst->DumpString(&mh.GetDexFile()) << "\n";
+ for (size_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
+ uint32_t raw_value = shadow_frame.GetVReg(i);
+ Object* ref_value = shadow_frame.GetVRegReference(i);
+ TRACE_LOG << StringPrintf(" vreg%d=0x%08X", i, raw_value);
+ if (ref_value != NULL) {
+ if (ref_value->GetClass()->IsStringClass() &&
+ ref_value->AsString()->GetCharArray() != NULL) {
+ TRACE_LOG << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
+ } else {
+ TRACE_LOG << "/" << PrettyTypeOf(ref_value);
+ }
+ }
+ }
+ TRACE_LOG << "\n";
+#undef TRACE_LOG
+ }
+}
+
+static inline bool IsBackwardBranch(int32_t branch_offset) {
+ return branch_offset <= 0;
+}
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
new file mode 100644
index 0000000000..5a008319f8
--- /dev/null
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -0,0 +1,2344 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+// In the following macros, we expect the following local variables exist:
+// - "self": the current Thread*.
+// - "inst" : the current Instruction*.
+// - "inst_data" : the current instruction's first 16 bits.
+// - "dex_pc": the current pc.
+// - "shadow_frame": the current shadow frame.
+// - "mh": the current MethodHelper.
+// - "currentHandlersTable": the current table of pointer to each instruction handler.
+
+// Advance to the next instruction and updates interpreter state.
+#define ADVANCE(_offset) \
+ do { \
+ int32_t disp = static_cast<int32_t>(_offset); \
+ inst = inst->RelativeAt(disp); \
+ dex_pc = static_cast<uint32_t>(static_cast<int32_t>(dex_pc) + disp); \
+ shadow_frame.SetDexPC(dex_pc); \
+ TraceExecution(shadow_frame, inst, dex_pc, mh); \
+ inst_data = inst->Fetch16(0); \
+ goto *currentHandlersTable[inst->Opcode(inst_data)]; \
+ } while (false)
+
+#define HANDLE_PENDING_EXCEPTION() goto exception_pending_label
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _offset) \
+ do { \
+ if (UNLIKELY(_is_exception_pending)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ } else { \
+ ADVANCE(_offset); \
+ } \
+ } while (false)
+
+#define UPDATE_HANDLER_TABLE() \
+ do { \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ currentHandlersTable = instrumentationHandlersTable; \
+ } else { \
+ currentHandlersTable = handlersTable; \
+ } \
+ } while (false);
+
+#define UNREACHABLE_CODE_CHECK() \
+ do { \
+ if (kIsDebugBuild) { \
+ LOG(FATAL) << "We should not be here !"; \
+ } \
+ } while (false)
+
+#define HANDLE_INSTRUCTION_START(opcode) op_##opcode: // NOLINT(whitespace/labels)
+#define HANDLE_INSTRUCTION_END() UNREACHABLE_CODE_CHECK()
+
+template<bool do_access_check>
+JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register) {
+ bool do_assignability_check = do_access_check;
+ if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
+ LOG(FATAL) << "Invalid shadow frame for interpreter use";
+ return JValue();
+ }
+ self->VerifyStack();
+
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), 0);
+ }
+ }
+ const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
+ uint16_t inst_data;
+
+ // Define handlers table.
+ static const void* handlersTable[kNumPackedOpcodes] = {
+#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&op_##code,
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUCTION_HANDLER
+ };
+
+ static const void* instrumentationHandlersTable[kNumPackedOpcodes] = {
+#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&instrumentation_op_##code,
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUCTION_HANDLER
+ };
+
+ const void** currentHandlersTable;
+ UPDATE_HANDLER_TABLE();
+
+ // Jump to first instruction.
+ ADVANCE(0);
+ UNREACHABLE_CODE_CHECK();
+
+ HANDLE_INSTRUCTION_START(NOP)
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_FROM16)
+ shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_16)
+ shadow_frame.SetVReg(inst->VRegA_32x(),
+ shadow_frame.GetVReg(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE_FROM16)
+ shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_WIDE_16)
+ shadow_frame.SetVRegLong(inst->VRegA_32x(),
+ shadow_frame.GetVRegLong(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT)
+ shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT_FROM16)
+ shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_22x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_OBJECT_16)
+ shadow_frame.SetVRegReference(inst->VRegA_32x(),
+ shadow_frame.GetVRegReference(inst->VRegB_32x()));
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT)
+ shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_RESULT_OBJECT)
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
+ Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_VOID) {
+ JValue result;
+ if (do_access_check) {
+ // If access checks are required then the dex-to-dex compiler and analysis of
+ // whether the class has final fields hasn't been performed. Conservatively
+ // perform the memory barrier now.
+ ANDROID_MEMBAR_STORE();
+ }
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_VOID_BARRIER) {
+ ANDROID_MEMBAR_STORE();
+ JValue result;
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN) {
+ JValue result;
+ result.SetJ(0);
+ result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_WIDE) {
+ JValue result;
+ result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
+ JValue result;
+ Object* obj_result = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ result.SetJ(0);
+ result.SetL(obj_result);
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (do_assignability_check && obj_result != NULL) {
+ Class* return_type = MethodHelper(shadow_frame.GetMethod()).GetReturnType();
+ if (return_type == NULL) {
+ // Return the pending exception.
+ HANDLE_PENDING_EXCEPTION();
+ }
+ if (!obj_result->VerifierInstanceOf(return_type)) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Returning '%s' that is not instance of return type '%s'",
+ ClassHelper(obj_result->GetClass()).GetDescriptor(),
+ ClassHelper(return_type).GetDescriptor());
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc,
+ result);
+ }
+ return result;
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_4) {
+ uint32_t dst = inst->VRegA_11n(inst_data);
+ int32_t val = inst->VRegB_11n(inst_data);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_16) {
+ uint32_t dst = inst->VRegA_21s(inst_data);
+ int32_t val = inst->VRegB_21s();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST) {
+ uint32_t dst = inst->VRegA_31i(inst_data);
+ int32_t val = inst->VRegB_31i();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_HIGH16) {
+ uint32_t dst = inst->VRegA_21h(inst_data);
+ int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_16)
+ shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_32)
+ shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
+ ADVANCE(3);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE)
+ shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
+ ADVANCE(5);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_WIDE_HIGH16)
+ shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
+ static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_STRING) {
+ String* s = ResolveString(self, mh, inst->VRegB_21c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
+ String* s = ResolveString(self, mh, inst->VRegB_31c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+ ADVANCE(3);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CONST_CLASS) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorEnter(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorExit(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CHECK_CAST) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+ if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ ADVANCE(2);
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INSTANCE_OF) {
+ Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
+ Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
+ ADVANCE(1);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEW_INSTANCE) {
+ Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEW_ARRAY) {
+ int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ length, self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY) {
+ bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILLED_NEW_ARRAY_RANGE) {
+ bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FILL_ARRAY_DATA) {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Array* array = obj->AsArray();
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ ADVANCE(3);
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(THROW) {
+ Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(exception == NULL)) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Throwing '%s' that is not instance of Throwable",
+ ClassHelper(exception->GetClass()).GetDescriptor());
+ } else {
+ self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ }
+ HANDLE_PENDING_EXCEPTION();
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO) {
+ int8_t offset = inst->VRegA_10t(inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO_16) {
+ int16_t offset = inst->VRegA_20t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(GOTO_32) {
+ int32_t offset = inst->VRegA_30t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
+ int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
+ int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPL_FLOAT) {
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPG_FLOAT) {
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPL_DOUBLE) {
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMPG_DOUBLE) {
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(CMP_LONG) {
+ int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
+ int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ ADVANCE(2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_EQ) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_NE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LT) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GT) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LE) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_EQZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_NEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LTZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_GTZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IF_LEZ) {
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ UPDATE_HANDLER_TABLE();
+ }
+ ADVANCE(offset);
+ } else {
+ ADVANCE(2);
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_BYTE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_CHAR) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_SHORT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_WIDE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AGET_OBJECT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_BYTE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_CHAR) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_SHORT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_WIDE) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(APUT_OBJECT) {
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
+ array->SetWithoutChecks(index, val);
+ ADVANCE(2);
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_BYTE) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_CHAR) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_SHORT) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_WIDE) {
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_OBJECT) {
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_WIDE_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_OBJECT_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_BYTE) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_CHAR) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_SHORT) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_WIDE) {
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SGET_OBJECT) {
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_BYTE) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_CHAR) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_SHORT) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_WIDE) {
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
+ bool success = DoIPutQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_BYTE) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_CHAR) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_SHORT) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_WIDE) {
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
+ bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
+ bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
+ bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
+ bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
+ bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
+ bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
+ bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
+ bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
+ bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
+ bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
+ bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
+ bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ UPDATE_HANDLER_TABLE();
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NOT_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NOT_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(NEG_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_INT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_INT) {
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int32_t result = art_float_to_integral<int32_t, float>(val);
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_LONG) {
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int64_t result = art_float_to_integral<int64_t, float>(val);
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_INT) {
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int32_t result = art_float_to_integral<int32_t, double>(val);
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_LONG) {
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int64_t result = art_float_to_integral<int64_t, double>(val);
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_BYTE)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_CHAR)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(INT_TO_SHORT)
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) +
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) -
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) *
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) &
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) |
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT)
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) ^
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) +
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) -
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) *
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_LONG) {
+ bool success = DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_LONG) {
+ bool success = DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) &
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) |
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_LONG)
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_FLOAT)
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
+ shadow_frame.GetVRegFloat(inst->VRegC_23x())));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_DOUBLE)
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
+ shadow_frame.GetVRegDouble(inst->VRegC_23x())));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) +
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) -
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) *
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) &
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) |
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) ^
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) +
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) -
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) *
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) &
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) |
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) ^
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_LONG_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) +
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) -
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) *
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) /
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_FLOAT_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ fmodf(shadow_frame.GetVRegFloat(vregA),
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) +
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SUB_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) -
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) *
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) /
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_DOUBLE_2ADDR) {
+ uint32_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ fmod(shadow_frame.GetVRegDouble(vregA),
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
+ ADVANCE(1);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RSUB_INT)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ inst->VRegC_22s() -
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_LIT16)
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
+ inst->VRegC_22s());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(ADD_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) +
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(RSUB_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ inst->VRegC_22b() -
+ shadow_frame.GetVReg(inst->VRegB_22b()));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(MUL_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) *
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(DIV_INT_LIT8) {
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(REM_INT_LIT8) {
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(AND_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) &
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(OR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) |
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(XOR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) ^
+ inst->VRegC_22b());
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHL_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) <<
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(SHR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) >>
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(USHR_INT_LIT8)
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
+ (inst->VRegC_22b() & 0x1f));
+ ADVANCE(2);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_3E)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_3F)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_40)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_41)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_42)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_43)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_79)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_7A)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EB)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EC)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_ED)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EE)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_EF)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F0)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F1)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F2)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F3)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F4)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F5)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F6)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F7)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F8)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_F9)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FA)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FB)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FC)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FD)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FE)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(UNUSED_FF)
+ UnexpectedOpcode(inst, mh);
+ HANDLE_INSTRUCTION_END();
+
+ exception_pending_label: {
+ CHECK(self->IsExceptionPending());
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_);
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
+ this_object,
+ instrumentation);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ return JValue(); /* Handled in caller. */
+ } else {
+ int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
+ ADVANCE(displacement);
+ }
+ }
+
+ // Create alternative instruction handlers dedicated to instrumentation.
+#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
+ instrumentation_op_##code: { \
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_), \
+ shadow_frame.GetMethod(), dex_pc); \
+ goto *handlersTable[Instruction::code]; \
+ }
+#include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(INSTRUMENTATION_INSTRUCTION_HANDLER)
+#undef DEX_INSTRUCTION_LIST
+#undef INSTRUMENTATION_INSTRUCTION_HANDLER
+} // NOLINT(readability/fn_size)
+
+// Explicit definitions of ExecuteGotoImpl.
+template JValue ExecuteGotoImpl<true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template JValue ExecuteGotoImpl<false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
new file mode 100644
index 0000000000..82f216aa41
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -0,0 +1,2145 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+#define HANDLE_PENDING_EXCEPTION() \
+ do { \
+ CHECK(self->IsExceptionPending()); \
+ if (UNLIKELY(self->TestAllFlags())) { \
+ CheckSuspend(self); \
+ } \
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, \
+ inst->GetDexPc(insns), \
+ this_object, \
+ instrumentation); \
+ if (found_dex_pc == DexFile::kDexNoIndex) { \
+ return JValue(); /* Handled in caller. */ \
+ } else { \
+ int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
+ inst = inst->RelativeAt(displacement); \
+ } \
+ } while (false)
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _next_function) \
+ do { \
+ if (UNLIKELY(_is_exception_pending)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ } else { \
+ inst = inst->_next_function(); \
+ } \
+ } while (false)
+
+// Code to run before each dex instruction.
+#define PREAMBLE()
+
+template<bool do_access_check>
+static JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register) {
+ bool do_assignability_check = do_access_check;
+ if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
+ LOG(FATAL) << "Invalid shadow frame for interpreter use";
+ return JValue();
+ }
+ self->VerifyStack();
+
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), 0);
+ }
+ }
+ const uint16_t* const insns = code_item->insns_;
+ const Instruction* inst = Instruction::At(insns + dex_pc);
+ uint16_t inst_data;
+ while (true) {
+ dex_pc = inst->GetDexPc(insns);
+ shadow_frame.SetDexPC(dex_pc);
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
+ }
+ TraceExecution(shadow_frame, inst, dex_pc, mh);
+ inst_data = inst->Fetch16(0);
+ switch (inst->Opcode(inst_data)) {
+ case Instruction::NOP:
+ PREAMBLE();
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_32x(),
+ shadow_frame.GetVReg(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_WIDE_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_WIDE_16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_32x(),
+ shadow_frame.GetVRegLong(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_OBJECT:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_OBJECT_FROM16:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
+ shadow_frame.GetVRegReference(inst->VRegB_22x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MOVE_OBJECT_16:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_32x(),
+ shadow_frame.GetVRegReference(inst->VRegB_32x()));
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::MOVE_RESULT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_11x(inst_data), result_register.GetI());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_RESULT_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), result_register.GetJ());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_RESULT_OBJECT:
+ PREAMBLE();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), result_register.GetL());
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::MOVE_EXCEPTION: {
+ PREAMBLE();
+ Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::RETURN_VOID: {
+ PREAMBLE();
+ JValue result;
+ if (do_access_check) {
+ // If access checks are required then the dex-to-dex compiler and analysis of
+ // whether the class has final fields hasn't been performed. Conservatively
+ // perform the memory barrier now.
+ ANDROID_MEMBAR_STORE();
+ }
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_VOID_BARRIER: {
+ PREAMBLE();
+ ANDROID_MEMBAR_STORE();
+ JValue result;
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN: {
+ PREAMBLE();
+ JValue result;
+ result.SetJ(0);
+ result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_WIDE: {
+ PREAMBLE();
+ JValue result;
+ result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::RETURN_OBJECT: {
+ PREAMBLE();
+ JValue result;
+ Object* obj_result = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ result.SetJ(0);
+ result.SetL(obj_result);
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ if (do_assignability_check && obj_result != NULL) {
+ Class* return_type = MethodHelper(shadow_frame.GetMethod()).GetReturnType();
+ if (return_type == NULL) {
+ // Return the pending exception.
+ HANDLE_PENDING_EXCEPTION();
+ }
+ if (!obj_result->VerifierInstanceOf(return_type)) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Returning '%s' that is not instance of return type '%s'",
+ ClassHelper(obj_result->GetClass()).GetDescriptor(),
+ ClassHelper(return_type).GetDescriptor());
+ HANDLE_PENDING_EXCEPTION();
+ }
+ }
+ if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), inst->GetDexPc(insns),
+ result);
+ }
+ return result;
+ }
+ case Instruction::CONST_4: {
+ PREAMBLE();
+ uint4_t dst = inst->VRegA_11n(inst_data);
+ int4_t val = inst->VRegB_11n(inst_data);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::CONST_16: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_21s(inst_data);
+ int16_t val = inst->VRegB_21s();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CONST: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_31i(inst_data);
+ int32_t val = inst->VRegB_31i();
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_3xx();
+ break;
+ }
+ case Instruction::CONST_HIGH16: {
+ PREAMBLE();
+ uint8_t dst = inst->VRegA_21h(inst_data);
+ int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+ shadow_frame.SetVReg(dst, val);
+ if (val == 0) {
+ shadow_frame.SetVRegReference(dst, NULL);
+ }
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CONST_WIDE_16:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::CONST_WIDE_32:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
+ inst = inst->Next_3xx();
+ break;
+ case Instruction::CONST_WIDE:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
+ inst = inst->Next_51l();
+ break;
+ case Instruction::CONST_WIDE_HIGH16:
+ shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
+ static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::CONST_STRING: {
+ PREAMBLE();
+ String* s = ResolveString(self, mh, inst->VRegB_21c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::CONST_STRING_JUMBO: {
+ PREAMBLE();
+ String* s = ResolveString(self, mh, inst->VRegB_31c());
+ if (UNLIKELY(s == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+ inst = inst->Next_3xx();
+ }
+ break;
+ }
+ case Instruction::CONST_CLASS: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::MONITOR_ENTER: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorEnter(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ }
+ break;
+ }
+ case Instruction::MONITOR_EXIT: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ DoMonitorExit(self, obj);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ }
+ break;
+ }
+ case Instruction::CHECK_CAST: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+ if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ inst = inst->Next_2xx();
+ }
+ }
+ break;
+ }
+ case Instruction::INSTANCE_OF: {
+ PREAMBLE();
+ Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (UNLIKELY(c == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::ARRAY_LENGTH: {
+ PREAMBLE();
+ Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
+ inst = inst->Next_1xx();
+ }
+ break;
+ }
+ case Instruction::NEW_INSTANCE: {
+ PREAMBLE();
+ Object* obj = AllocObjectFromCode(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::NEW_ARRAY: {
+ PREAMBLE();
+ int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode(inst->VRegC_22c(), shadow_frame.GetMethod(),
+ length, self, do_access_check);
+ if (UNLIKELY(obj == NULL)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY: {
+ PREAMBLE();
+ bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ PREAMBLE();
+ bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
+ self, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::FILL_ARRAY_DATA: {
+ PREAMBLE();
+ Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ Array* array = obj->AsArray();
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ inst = inst->Next_3xx();
+ break;
+ }
+ case Instruction::THROW: {
+ PREAMBLE();
+ Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+ if (UNLIKELY(exception == NULL)) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
+ // This should never happen.
+ self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
+ "Ljava/lang/VirtualMachineError;",
+ "Throwing '%s' that is not instance of Throwable",
+ ClassHelper(exception->GetClass()).GetDescriptor());
+ } else {
+ self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ }
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ case Instruction::GOTO: {
+ PREAMBLE();
+ int8_t offset = inst->VRegA_10t(inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::GOTO_16: {
+ PREAMBLE();
+ int16_t offset = inst->VRegA_20t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::GOTO_32: {
+ PREAMBLE();
+ int32_t offset = inst->VRegA_30t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::PACKED_SWITCH: {
+ PREAMBLE();
+ int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::SPARSE_SWITCH: {
+ PREAMBLE();
+ int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ break;
+ }
+ case Instruction::CMPL_FLOAT: {
+ PREAMBLE();
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMPG_FLOAT: {
+ PREAMBLE();
+ float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
+ float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMPL_DOUBLE: {
+ PREAMBLE();
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+
+ case Instruction::CMPG_DOUBLE: {
+ PREAMBLE();
+ double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
+ double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::CMP_LONG: {
+ PREAMBLE();
+ int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
+ int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
+ int32_t result;
+ if (val1 > val2) {
+ result = 1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
+ inst = inst->Next_2xx();
+ break;
+ }
+ case Instruction::IF_EQ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_NE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LT: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GT: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LE: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ int16_t offset = inst->VRegC_22t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_EQZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_NEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LTZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_GTZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::IF_LEZ: {
+ PREAMBLE();
+ if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
+ int16_t offset = inst->VRegB_21t();
+ if (IsBackwardBranch(offset)) {
+ if (UNLIKELY(self->TestAllFlags())) {
+ CheckSuspend(self);
+ }
+ }
+ inst = inst->RelativeAt(offset);
+ } else {
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::AGET_BOOLEAN: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_BYTE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_CHAR: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_SHORT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_WIDE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::AGET_OBJECT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index))) {
+ shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_BOOLEAN: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ BooleanArray* array = a->AsBooleanArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_BYTE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ByteArray* array = a->AsByteArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_CHAR: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ CharArray* array = a->AsCharArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_SHORT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ ShortArray* array = a->AsShortArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ IntArray* array = a->AsIntArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_WIDE: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ LongArray* array = a->AsLongArray();
+ if (LIKELY(array->IsValidIndex(index))) {
+ array->GetData()[index] = val;
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::APUT_OBJECT: {
+ PREAMBLE();
+ Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ HANDLE_PENDING_EXCEPTION();
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) {
+ array->SetWithoutChecks(index, val);
+ inst = inst->Next_2xx();
+ } else {
+ HANDLE_PENDING_EXCEPTION();
+ }
+ break;
+ }
+ case Instruction::IGET_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_WIDE_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_OBJECT_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SGET_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_WIDE_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IPUT_OBJECT_QUICK: {
+ PREAMBLE();
+ bool success = DoIPutQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_BOOLEAN: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_BYTE: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_CHAR: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_SHORT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_WIDE: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SPUT_OBJECT: {
+ PREAMBLE();
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL: {
+ PREAMBLE();
+ bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_SUPER: {
+ PREAMBLE();
+ bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_SUPER_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_DIRECT: {
+ PREAMBLE();
+ bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_DIRECT_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_INTERFACE: {
+ PREAMBLE();
+ bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_INTERFACE_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_STATIC: {
+ PREAMBLE();
+ bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_STATIC_RANGE: {
+ PREAMBLE();
+ bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ PREAMBLE();
+ bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ PREAMBLE();
+ bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ break;
+ }
+ case Instruction::NEG_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NOT_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NOT_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::NEG_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::FLOAT_TO_INT: {
+ PREAMBLE();
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int32_t result = art_float_to_integral<int32_t, float>(val);
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::FLOAT_TO_LONG: {
+ PREAMBLE();
+ float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+ int64_t result = art_float_to_integral<int64_t, float>(val);
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::FLOAT_TO_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::DOUBLE_TO_INT: {
+ PREAMBLE();
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int32_t result = art_float_to_integral<int32_t, double>(val);
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DOUBLE_TO_LONG: {
+ PREAMBLE();
+ double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+ int64_t result = art_float_to_integral<int64_t, double>(val);
+ shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DOUBLE_TO_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_BYTE:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_CHAR:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::INT_TO_SHORT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
+ static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ case Instruction::ADD_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) +
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) -
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) *
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::SHL_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::AND_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) &
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) |
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_23x()) ^
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) +
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) -
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) *
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_LONG:
+ PREAMBLE();
+ DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+ break;
+ case Instruction::REM_LONG:
+ PREAMBLE();
+ DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+ break;
+ case Instruction::AND_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) &
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) |
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHL_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_LONG:
+ PREAMBLE();
+ shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
+ (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
+ shadow_frame.GetVRegFloat(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::REM_FLOAT:
+ PREAMBLE();
+ shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
+ fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
+ shadow_frame.GetVRegFloat(inst->VRegC_23x())));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SUB_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
+ shadow_frame.GetVRegDouble(inst->VRegC_23x()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::REM_DOUBLE:
+ PREAMBLE();
+ shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
+ fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
+ shadow_frame.GetVRegDouble(inst->VRegC_23x())));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) +
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) -
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) *
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+ break;
+ }
+ case Instruction::REM_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+ break;
+ }
+ case Instruction::SHL_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::USHR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::AND_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) &
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::OR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) |
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::XOR_INT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVReg(vregA,
+ shadow_frame.GetVReg(vregA) ^
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) +
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) -
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) *
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ break;
+ }
+ case Instruction::REM_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+ break;
+ }
+ case Instruction::AND_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) &
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::OR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) |
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::XOR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) ^
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHL_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) <<
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SHR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ shadow_frame.GetVRegLong(vregA) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::USHR_LONG_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegLong(vregA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
+ (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) +
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) -
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) *
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ shadow_frame.GetVRegFloat(vregA) /
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::REM_FLOAT_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegFloat(vregA,
+ fmodf(shadow_frame.GetVRegFloat(vregA),
+ shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) +
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::SUB_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) -
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::MUL_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) *
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::DIV_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ shadow_frame.GetVRegDouble(vregA) /
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::REM_DOUBLE_2ADDR: {
+ PREAMBLE();
+ uint4_t vregA = inst->VRegA_12x(inst_data);
+ shadow_frame.SetVRegDouble(vregA,
+ fmod(shadow_frame.GetVRegDouble(vregA),
+ shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
+ inst = inst->Next_1xx();
+ break;
+ }
+ case Instruction::ADD_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::RSUB_INT:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ inst->VRegC_22s() -
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT_LIT16: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT_LIT16: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::AND_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT_LIT16:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
+ inst->VRegC_22s());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::ADD_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) +
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::RSUB_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ inst->VRegC_22b() -
+ shadow_frame.GetVReg(inst->VRegB_22b()));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::MUL_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) *
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::DIV_INT_LIT8: {
+ PREAMBLE();
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::REM_INT_LIT8: {
+ PREAMBLE();
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::AND_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) &
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::OR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) |
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::XOR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) ^
+ inst->VRegC_22b());
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHL_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) <<
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::SHR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ shadow_frame.GetVReg(inst->VRegB_22b()) >>
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::USHR_INT_LIT8:
+ PREAMBLE();
+ shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
+ static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
+ (inst->VRegC_22b() & 0x1f));
+ inst = inst->Next_2xx();
+ break;
+ case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
+ case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_79:
+ case Instruction::UNUSED_7A:
+ UnexpectedOpcode(inst, mh);
+ }
+ }
+} // NOLINT(readability/fn_size)
+
+// Explicit definitions of ExecuteSwitchImpl.
+template JValue ExecuteSwitchImpl<true>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template JValue ExecuteSwitchImpl<false>(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index a2efc48c84..523d89278a 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -47,7 +47,7 @@ std::string DescribeMethod(const MethodId& method_id) {
std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
std::string signature("unknown");
- Dbg::GetSignature(ref_type_id, signature);
+ Dbg::GetSignature(ref_type_id, &signature);
return StringPrintf("%#llx (%s)", ref_type_id, signature.c_str());
}
@@ -547,7 +547,7 @@ static JdwpError RT_Signature(JdwpState*, Request& request, ExpandBuf* pReply, b
RefTypeId refTypeId = request.ReadRefTypeId();
std::string signature;
- JdwpError status = Dbg::GetSignature(refTypeId, signature);
+ JdwpError status = Dbg::GetSignature(refTypeId, &signature);
if (status != ERR_NONE) {
return status;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 0a0028462d..8be9b21cdf 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -228,6 +228,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
const char* name, const char* sig, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Class* c = soa.Decode<Class*>(jni_class);
+ DCHECK(c != nullptr);
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) {
return NULL;
}
@@ -307,14 +308,14 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
return soa.EncodeField(field);
}
-static void PinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array)
+static void PinPrimitiveArray(const ScopedObjectAccess& soa, Array* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JavaVMExt* vm = soa.Vm();
MutexLock mu(soa.Self(), vm->pins_lock);
vm->pin_table.Add(array);
}
-static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, const Array* array)
+static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, Array* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JavaVMExt* vm = soa.Vm();
MutexLock mu(soa.Self(), vm->pins_lock);
@@ -1982,7 +1983,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(GetStringUTFRegion, java_string);
ScopedObjectAccess soa(env);
String* s = soa.Decode<String*>(java_string);
- const CharArray* chars = s->GetCharArray();
+ CharArray* chars = s->GetCharArray();
PinPrimitiveArray(soa, chars);
if (is_copy != NULL) {
*is_copy = JNI_FALSE;
@@ -3063,15 +3064,6 @@ void JavaVMExt::AllowNewWeakGlobals() {
weak_globals_add_condition_.Broadcast(self);
}
-void JavaVMExt::SweepWeakGlobals(IsMarkedTester is_marked, void* arg) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- for (const Object** entry : weak_globals_) {
- if (!is_marked(*entry, arg)) {
- *entry = kClearedJniWeakGlobal;
- }
- }
-}
-
mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
MutexLock mu(self, weak_globals_lock_);
while (UNLIKELY(!allow_new_weak_globals_)) {
@@ -3253,6 +3245,18 @@ void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
return native_method;
}
+void JavaVMExt::SweepJniWeakGlobals(RootVisitor visitor, void* arg) {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ for (mirror::Object** entry : weak_globals_) {
+ mirror::Object* obj = *entry;
+ mirror::Object* new_obj = visitor(obj, arg);
+ if (new_obj == nullptr) {
+ new_obj = kClearedJniWeakGlobal;
+ }
+ *entry = new_obj;
+ }
+}
+
void JavaVMExt::VisitRoots(RootVisitor* visitor, void* arg) {
Thread* self = Thread::Current();
{
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 32d0bfcee8..c73ed48014 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -98,7 +98,7 @@ class JavaVMExt : public JavaVM {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DeleteWeakGlobalRef(Thread* self, jweak obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepWeakGlobals(IsMarkedTester is_marked, void* arg);
+ void SweepJniWeakGlobals(RootVisitor visitor, void* arg);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref);
Runtime* runtime;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 79d156de27..c389580ebf 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1012,31 +1012,50 @@ TEST_F(JniInternalTest, RegisterNatives) {
scalar_type, \
expected_class_descriptor) \
jsize size = 4; \
+ \
/* Allocate an array and check it has the right type and length. */ \
scalar_type ## Array a = env_->new_fn(size); \
EXPECT_TRUE(a != NULL); \
EXPECT_TRUE(env_->IsInstanceOf(a, env_->FindClass(expected_class_descriptor))); \
EXPECT_EQ(size, env_->GetArrayLength(a)); \
+ \
+ /* GetPrimitiveArrayRegion/SetPrimitiveArrayRegion */ \
/* AIOOBE for negative start offset. */ \
env_->get_region_fn(a, -1, 1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, -1, 1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
/* AIOOBE for negative length. */ \
env_->get_region_fn(a, 0, -1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, 0, -1, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
/* AIOOBE for buffer overrun. */ \
env_->get_region_fn(a, size - 1, size, NULL); \
EXPECT_EXCEPTION(aioobe_); \
env_->set_region_fn(a, size - 1, size, NULL); \
EXPECT_EXCEPTION(aioobe_); \
+ \
+ /* It's okay for the buffer to be NULL as long as the length is 0. */ \
+ env_->get_region_fn(a, 2, 0, NULL); \
+ /* Even if the offset is invalid... */ \
+ env_->get_region_fn(a, 123, 0, NULL); \
+ EXPECT_EXCEPTION(aioobe_); \
+ \
+ /* It's okay for the buffer to be NULL as long as the length is 0. */ \
+ env_->set_region_fn(a, 2, 0, NULL); \
+ /* Even if the offset is invalid... */ \
+ env_->set_region_fn(a, 123, 0, NULL); \
+ EXPECT_EXCEPTION(aioobe_); \
+ \
/* Prepare a couple of buffers. */ \
UniquePtr<scalar_type[]> src_buf(new scalar_type[size]); \
UniquePtr<scalar_type[]> dst_buf(new scalar_type[size]); \
for (jsize i = 0; i < size; ++i) { src_buf[i] = scalar_type(i); } \
for (jsize i = 0; i < size; ++i) { dst_buf[i] = scalar_type(-1); } \
+ \
/* Copy all of src_buf onto the heap. */ \
env_->set_region_fn(a, 0, size, &src_buf[0]); \
/* Copy back only part. */ \
@@ -1252,6 +1271,12 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
+ // It's okay for the buffer to be NULL as long as the length is 0.
+ env_->GetStringRegion(s, 2, 0, NULL);
+ // Even if the offset is invalid...
+ env_->GetStringRegion(s, 123, 0, NULL);
+ EXPECT_EXCEPTION(sioobe_);
+
env_->GetStringUTFRegion(s, -1, 0, NULL);
EXPECT_EXCEPTION(sioobe_);
env_->GetStringUTFRegion(s, 0, -1, NULL);
@@ -1267,6 +1292,12 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
EXPECT_EQ('e', bytes[1]);
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
+
+ // It's okay for the buffer to be NULL as long as the length is 0.
+ env_->GetStringUTFRegion(s, 2, 0, NULL);
+ // Even if the offset is invalid...
+ env_->GetStringUTFRegion(s, 123, 0, NULL);
+ EXPECT_EXCEPTION(sioobe_);
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index eb73c7dd38..c60e714d44 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -20,6 +20,9 @@
#include "array.h"
#include "class.h"
+#include "gc/heap-inl.h"
+#include "thread.h"
+#include "utils.h"
namespace art {
namespace mirror {
@@ -33,6 +36,68 @@ inline size_t Array::SizeOf() const {
return header_size + data_size;
}
+static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(array_class != NULL);
+ DCHECK_GE(component_count, 0);
+ DCHECK(array_class->IsArrayClass());
+
+ size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
+ size_t data_size = component_count * component_size;
+ size_t size = header_size + data_size;
+
+ // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
+ size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size);
+ if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) {
+ self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
+ PrettyDescriptor(array_class).c_str(),
+ component_count).c_str());
+ return 0; // failure
+ }
+ return size;
+}
+
+static inline Array* SetArrayLength(Array* array, size_t length) {
+ if (LIKELY(array != NULL)) {
+ DCHECK(array->IsArrayInstance());
+ array->SetLength(length);
+ }
+ return array;
+}
+
+inline Array* Array::AllocInstrumented(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size) {
+ size_t size = ComputeArraySize(self, array_class, component_count, component_size);
+ if (UNLIKELY(size == 0)) {
+ return NULL;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ Array* array = down_cast<Array*>(heap->AllocObjectInstrumented(self, array_class, size));
+ return SetArrayLength(array, component_count);
+}
+
+inline Array* Array::AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size) {
+ size_t size = ComputeArraySize(self, array_class, component_count, component_size);
+ if (UNLIKELY(size == 0)) {
+ return NULL;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ Array* array = down_cast<Array*>(heap->AllocObjectUninstrumented(self, array_class, size));
+ return SetArrayLength(array, component_count);
+}
+
+inline Array* Array::AllocInstrumented(Thread* self, Class* array_class, int32_t component_count) {
+ DCHECK(array_class->IsArrayClass());
+ return AllocInstrumented(self, array_class, component_count, array_class->GetComponentSize());
+}
+
+inline Array* Array::AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count) {
+ DCHECK(array_class->IsArrayClass());
+ return AllocUninstrumented(self, array_class, component_count, array_class->GetComponentSize());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 88cd309eeb..020085dbf0 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -32,39 +32,6 @@
namespace art {
namespace mirror {
-Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size) {
- DCHECK(array_class != NULL);
- DCHECK_GE(component_count, 0);
- DCHECK(array_class->IsArrayClass());
-
- size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
- size_t data_size = component_count * component_size;
- size_t size = header_size + data_size;
-
- // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
- size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size);
- if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) {
- self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
- PrettyDescriptor(array_class).c_str(),
- component_count).c_str());
- return NULL;
- }
-
- gc::Heap* heap = Runtime::Current()->GetHeap();
- Array* array = down_cast<Array*>(heap->AllocObject(self, array_class, size));
- if (array != NULL) {
- DCHECK(array->IsArrayInstance());
- array->SetLength(component_count);
- }
- return array;
-}
-
-Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) {
- DCHECK(array_class->IsArrayClass());
- return Alloc(self, array_class, component_count, array_class->GetComponentSize());
-}
-
// Create a multi-dimensional array of Objects or primitive types.
//
// We have to generate the names for X[], X[][], X[][][], and so on. The
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index db6132df59..570dcaa292 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -27,10 +27,24 @@ class MANAGED Array : public Object {
// A convenience for code that doesn't know the component size,
// and doesn't want to have to work it out itself.
static Array* Alloc(Thread* self, Class* array_class, int32_t component_count)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocInstrumented(self, array_class, component_count);
+ }
+ static Array* AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static Array* AllocInstrumented(Thread* self, Class* array_class, int32_t component_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
size_t component_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocInstrumented(self, array_class, component_count, component_size);
+ }
+ static Array* AllocUninstrumented(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static Array* AllocInstrumented(Thread* self, Class* array_class, int32_t component_count,
+ size_t component_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Array* CreateMultiArray(Thread* self, Class* element_class, IntArray* dimensions)
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 1e1138745d..88cffb77fc 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -23,6 +23,7 @@
#include "art_method.h"
#include "class_loader.h"
#include "dex_cache.h"
+#include "gc/heap-inl.h"
#include "iftable.h"
#include "object_array-inl.h"
#include "runtime.h"
@@ -342,6 +343,24 @@ inline void Class::SetName(String* name) {
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false);
}
+inline void Class::CheckObjectAlloc() {
+ DCHECK(!IsArrayClass()) << PrettyClass(this);
+ DCHECK(IsInstantiable()) << PrettyClass(this);
+ // TODO: decide whether we want this check. It currently fails during bootstrap.
+ // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
+ DCHECK_GE(this->object_size_, sizeof(Object));
+}
+
+inline Object* Class::AllocObjectInstrumented(Thread* self) {
+ CheckObjectAlloc();
+ return Runtime::Current()->GetHeap()->AllocObjectInstrumented(self, this, this->object_size_);
+}
+
+inline Object* Class::AllocObjectUninstrumented(Thread* self) {
+ CheckObjectAlloc();
+ return Runtime::Current()->GetHeap()->AllocObjectUninstrumented(self, this, this->object_size_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index add7e1b2af..c6db5b9a61 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -118,15 +118,6 @@ void Class::SetDexCache(DexCache* new_dex_cache) {
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false);
}
-Object* Class::AllocObject(Thread* self) {
- DCHECK(!IsArrayClass()) << PrettyClass(this);
- DCHECK(IsInstantiable()) << PrettyClass(this);
- // TODO: decide whether we want this check. It currently fails during bootstrap.
- // DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
- DCHECK_GE(this->object_size_, sizeof(Object));
- return Runtime::Current()->GetHeap()->AllocObject(self, this, this->object_size_);
-}
-
void Class::SetClassSize(size_t new_class_size) {
if (kIsDebugBuild && (new_class_size < GetClassSize())) {
DumpClass(LOG(ERROR), kDumpClassFullDetail);
@@ -144,7 +135,7 @@ String* Class::ComputeName() {
if (name != NULL) {
return name;
}
- std::string descriptor(ClassHelper(this).GetDescriptor());
+ std::string descriptor(ClassHelper(this).GetDescriptorAsStringPiece().as_string());
if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
// The descriptor indicates that this is the class for
// a primitive type; special-case the return value.
@@ -303,8 +294,8 @@ bool Class::IsInSamePackage(const Class* that) const {
return true;
}
// Compare the package part of the descriptor string.
- return IsInSamePackage(ClassHelper(klass1).GetDescriptor(),
- ClassHelper(klass2).GetDescriptor());
+ return IsInSamePackage(ClassHelper(klass1).GetDescriptorAsStringPiece(),
+ ClassHelper(klass2).GetDescriptorAsStringPiece());
}
bool Class::IsClassClass() const {
@@ -334,7 +325,7 @@ void Class::SetClassLoader(ClassLoader* new_class_loader) {
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false);
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature) const {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature) const {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(name, signature);
if (method != NULL) {
@@ -370,13 +361,24 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_me
return NULL;
}
-
ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const {
MethodHelper mh;
for (size_t i = 0; i < NumDirectMethods(); ++i) {
ArtMethod* method = GetDirectMethod(i);
mh.ChangeMethod(method);
- if (name == mh.GetName() && signature == mh.GetSignature()) {
+ if (name == mh.GetNameAsStringPiece() && mh.GetSignature() == signature) {
+ return method;
+ }
+ }
+ return NULL;
+}
+
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) const {
+ MethodHelper mh;
+ for (size_t i = 0; i < NumDirectMethods(); ++i) {
+ ArtMethod* method = GetDirectMethod(i);
+ mh.ChangeMethod(method);
+ if (name == mh.GetNameAsStringPiece() && signature == mh.GetSignature()) {
return method;
}
}
@@ -405,6 +407,16 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& s
return NULL;
}
+ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature) const {
+ for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature);
+ if (method != NULL) {
+ return method;
+ }
+ }
+ return NULL;
+}
+
ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx);
@@ -415,13 +427,25 @@ ArtMethod* Class::FindDirectMethod(const DexCache* dex_cache, uint32_t dex_metho
return NULL;
}
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const {
+ MethodHelper mh;
+ for (size_t i = 0; i < NumVirtualMethods(); ++i) {
+ ArtMethod* method = GetVirtualMethod(i);
+ mh.ChangeMethod(method);
+ if (name == mh.GetNameAsStringPiece() && mh.GetSignature() == signature) {
+ return method;
+ }
+ }
+ return NULL;
+}
+
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
- const StringPiece& signature) const {
+ const Signature& signature) const {
MethodHelper mh;
for (size_t i = 0; i < NumVirtualMethods(); ++i) {
ArtMethod* method = GetVirtualMethod(i);
mh.ChangeMethod(method);
- if (name == mh.GetName() && signature == mh.GetSignature()) {
+ if (name == mh.GetNameAsStringPiece() && signature == mh.GetSignature()) {
return method;
}
}
@@ -450,6 +474,16 @@ ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const StringPiece&
return NULL;
}
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name, const Signature& signature) const {
+ for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
+ ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature);
+ if (method != NULL) {
+ return method;
+ }
+ }
+ return NULL;
+}
+
ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const {
for (const Class* klass = this; klass != NULL; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx);
@@ -460,6 +494,21 @@ ArtMethod* Class::FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_meth
return NULL;
}
+ArtMethod* Class::FindClassInitializer() const {
+ for (size_t i = 0; i < NumDirectMethods(); ++i) {
+ ArtMethod* method = GetDirectMethod(i);
+ if (method->IsConstructor() && method->IsStatic()) {
+ if (kIsDebugBuild) {
+ MethodHelper mh(method);
+ CHECK_STREQ(mh.GetName(), "<clinit>");
+ CHECK_STREQ(mh.GetSignature().ToString().c_str(), "()V");
+ }
+ return method;
+ }
+ }
+ return NULL;
+}
+
ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) {
// Is the field in this class?
// Interfaces are not relevant because they can't contain instance fields.
@@ -467,7 +516,7 @@ ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const String
for (size_t i = 0; i < NumInstanceFields(); ++i) {
ArtField* f = GetInstanceField(i);
fh.ChangeField(f);
- if (name == fh.GetName() && type == fh.GetTypeDescriptor()) {
+ if (name == fh.GetNameAsStringPiece() && type == fh.GetTypeDescriptorAsStringPiece()) {
return f;
}
}
@@ -516,7 +565,7 @@ ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPi
for (size_t i = 0; i < NumStaticFields(); ++i) {
ArtField* f = GetStaticField(i);
fh.ChangeField(f);
- if (name == fh.GetName() && type == fh.GetTypeDescriptor()) {
+ if (name == fh.GetNameAsStringPiece() && type == fh.GetTypeDescriptorAsStringPiece()) {
return f;
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d97b603ad8..586151de45 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -59,6 +59,7 @@ namespace art {
struct ClassClassOffsets;
struct ClassOffsets;
+class Signature;
class StringPiece;
namespace mirror {
@@ -371,7 +372,12 @@ class MANAGED Class : public StaticStorageBase {
}
// Creates a raw object instance but does not invoke the default constructor.
- Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectInstrumented(self);
+ }
+
+ Object* AllocObjectUninstrumented(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Object* AllocObjectInstrumented(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsVariableSize() const {
// Classes and arrays vary in size, and so the object_size_ field cannot
@@ -560,39 +566,53 @@ class MANAGED Class : public StaticStorageBase {
ArtMethod* FindVirtualMethodForInterface(ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& descriptor) const
+ ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method) const
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& descriptor) const
+ ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature) const
+ ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature) const
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* FindClassInitializer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
int32_t GetIfTableCount() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
IfTable* GetIfTable() const;
@@ -764,6 +784,8 @@ class MANAGED Class : public StaticStorageBase {
bool IsAssignableFromArray(const Class* klass) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// defining class loader, or NULL for the "bootstrap" system loader
ClassLoader* class_loader_;
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e105525f79..003581a1c8 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -189,6 +189,11 @@ class MANAGED Object {
}
}
+ Object** GetFieldObjectAddr(MemberOffset field_offset) ALWAYS_INLINE {
+ VerifyObject(this);
+ return reinterpret_cast<Object**>(reinterpret_cast<byte*>(this) + field_offset.Int32Value());
+ }
+
uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const {
VerifyObject(this);
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index a505ed00a3..9d76c6bc11 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -17,6 +17,7 @@
#include "stack_trace_element.h"
#include "class.h"
+#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
#include "string.h"
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index f8a0e531eb..9c93f17f8e 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -17,13 +17,14 @@
#include "string.h"
#include "array.h"
+#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "intern_table.h"
#include "object-inl.h"
#include "runtime.h"
#include "sirt_ref.h"
#include "thread.h"
-#include "utf.h"
+#include "utf-inl.h"
namespace art {
namespace mirror {
@@ -32,6 +33,10 @@ const CharArray* String::GetCharArray() const {
return GetFieldObject<const CharArray*>(ValueOffset(), false);
}
+CharArray* String::GetCharArray() {
+ return GetFieldObject<CharArray*>(ValueOffset(), false);
+}
+
void String::ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()));
}
@@ -285,4 +290,3 @@ int32_t String::CompareTo(String* rhs) const {
} // namespace mirror
} // namespace art
-
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 81fe42f2a6..1879f04bef 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -44,6 +44,7 @@ class MANAGED String : public Object {
}
const CharArray* GetCharArray() const;
+ CharArray* GetCharArray();
int32_t GetOffset() const {
int32_t result = GetField32(OffsetOffset(), false);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 088d1f7a88..e7ab2d49e0 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -194,6 +194,10 @@ mirror::Object* Monitor::GetObject() {
return obj_;
}
+void Monitor::SetObject(mirror::Object* object) {
+ obj_ = object;
+}
+
void Monitor::Lock(Thread* self) {
if (owner_ == self) {
lock_count_++;
@@ -260,8 +264,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
if (!Runtime::Current()->IsStarted()) {
std::ostringstream ss;
self->Dump(ss);
- std::string str(ss.str());
- LOG(ERROR) << "IllegalMonitorStateException: " << str;
+ LOG(ERROR) << self->GetException(NULL)->Dump() << "\n" << ss.str();
}
va_end(args);
}
@@ -1020,15 +1023,19 @@ void MonitorList::Add(Monitor* m) {
list_.push_front(m);
}
-void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) {
+void MonitorList::SweepMonitorList(RootVisitor visitor, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
- if (!is_marked(m->GetObject(), arg)) {
- VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
+ mirror::Object* obj = m->GetObject();
+ mirror::Object* new_obj = visitor(obj, arg);
+ if (new_obj == nullptr) {
+ VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
+ << m->GetObject();
delete m;
it = list_.erase(it);
} else {
+ m->SetObject(new_obj);
++it;
}
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 0b5b7e546a..71fe71671f 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -103,6 +103,7 @@ class Monitor {
static bool IsValidLockWord(int32_t lock_word);
mirror::Object* GetObject();
+ void SetObject(mirror::Object* object);
private:
explicit Monitor(Thread* owner, mirror::Object* obj)
@@ -159,7 +160,7 @@ class Monitor {
int lock_count_ GUARDED_BY(monitor_lock_);
// What object are we part of (for debugging).
- mirror::Object* const obj_;
+ mirror::Object* obj_;
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
@@ -182,10 +183,11 @@ class MonitorList {
~MonitorList();
void Add(Monitor* m);
- void SweepMonitorList(IsMarkedTester is_marked, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void SweepMonitorList(RootVisitor visitor, void* arg);
void DisallowNewMonitors();
void AllowNewMonitors();
+
private:
bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 30b4dc7ef5..100f5a9b18 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -316,6 +316,28 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
}
}
+static void System_arraycopyCharUnchecked(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) {
+ ScopedObjectAccess soa(env);
+ DCHECK(javaSrc != NULL);
+ DCHECK(javaDst != NULL);
+ mirror::Object* srcObject = soa.Decode<mirror::Object*>(javaSrc);
+ mirror::Object* dstObject = soa.Decode<mirror::Object*>(javaDst);
+ DCHECK(srcObject->IsArrayInstance());
+ DCHECK(dstObject->IsArrayInstance());
+ mirror::Array* srcArray = srcObject->AsArray();
+ mirror::Array* dstArray = dstObject->AsArray();
+ DCHECK(srcPos >= 0 && dstPos >= 0 && length >= 0 &&
+ srcPos + length <= srcArray->GetLength() && dstPos + length <= dstArray->GetLength());
+ DCHECK_EQ(srcArray->GetClass()->GetComponentType(), dstArray->GetClass()->GetComponentType());
+ DCHECK(srcArray->GetClass()->GetComponentType()->IsPrimitive());
+ DCHECK(dstArray->GetClass()->GetComponentType()->IsPrimitive());
+ DCHECK_EQ(srcArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
+ DCHECK_EQ(dstArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
+ uint8_t* dstBytes = reinterpret_cast<uint8_t*>(dstArray->GetRawData(2));
+ const uint8_t* srcBytes = reinterpret_cast<const uint8_t*>(srcArray->GetRawData(2));
+ move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2);
+}
+
static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(javaObject);
@@ -324,6 +346,7 @@ static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(System, arraycopy, "(Ljava/lang/Object;ILjava/lang/Object;II)V"),
+ NATIVE_METHOD(System, arraycopyCharUnchecked, "([CI[CII)V"),
NATIVE_METHOD(System, identityHashCode, "(Ljava/lang/Object;)I"),
};
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 6ee3016179..f83db903ff 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -111,6 +111,17 @@ class ClassHelper {
}
}
+ StringPiece GetDescriptorAsStringPiece() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(klass_ != NULL);
+ if (UNLIKELY(klass_->IsArrayClass() || klass_->IsPrimitive() || klass_->IsProxyClass())) {
+ return StringPiece(GetDescriptor());
+ } else {
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
+ return dex_file.StringDataAsStringPieceByIdx(type_id.descriptor_idx_);
+ }
+ }
+
const char* GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string result("[");
const mirror::Class* saved_klass = klass_;
@@ -182,7 +193,7 @@ class ClassHelper {
}
const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string descriptor(GetDescriptor());
+ std::string descriptor(GetDescriptorAsStringPiece().as_string());
const DexFile& dex_file = GetDexFile();
const DexFile::ClassDef* dex_class_def = GetClassDef();
CHECK(dex_class_def != NULL);
@@ -267,53 +278,77 @@ class FieldHelper {
}
field_ = new_f;
}
+
const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t field_index = field_->GetDexFieldIndex();
- if (!field_->GetDeclaringClass()->IsProxyClass()) {
- const DexFile& dex_file = GetDexFile();
- return dex_file.GetFieldName(dex_file.GetFieldId(field_index));
- } else {
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
DCHECK(field_->IsStatic());
DCHECK_LT(field_index, 2U);
return field_index == 0 ? "interfaces" : "throws";
}
+ const DexFile& dex_file = GetDexFile();
+ return dex_file.GetFieldName(dex_file.GetFieldId(field_index));
}
+
+ StringPiece GetNameAsStringPiece() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t field_index = field_->GetDexFieldIndex();
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
+ return StringPiece(GetName());
+ }
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ return dex_file.StringDataAsStringPieceByIdx(field_id.name_idx_);
+ }
+
mirror::Class* GetType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t field_index = field_->GetDexFieldIndex();
- if (!field_->GetDeclaringClass()->IsProxyClass()) {
- const DexFile& dex_file = GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
- mirror::Class* type = GetDexCache()->GetResolvedType(field_id.type_idx_);
- if (resolve && (type == NULL)) {
- type = GetClassLinker()->ResolveType(field_id.type_idx_, field_);
- CHECK(type != NULL || Thread::Current()->IsExceptionPending());
- }
- return type;
- } else {
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
return GetClassLinker()->FindSystemClass(GetTypeDescriptor());
}
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ mirror::Class* type = GetDexCache()->GetResolvedType(field_id.type_idx_);
+ if (resolve && (type == NULL)) {
+ type = GetClassLinker()->ResolveType(field_id.type_idx_, field_);
+ CHECK(type != NULL || Thread::Current()->IsExceptionPending());
+ }
+ return type;
}
+
const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t field_index = field_->GetDexFieldIndex();
- if (!field_->GetDeclaringClass()->IsProxyClass()) {
- const DexFile& dex_file = GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
- return dex_file.GetFieldTypeDescriptor(field_id);
- } else {
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
DCHECK(field_->IsStatic());
DCHECK_LT(field_index, 2U);
// 0 == Class[] interfaces; 1 == Class[][] throws;
return field_index == 0 ? "[Ljava/lang/Class;" : "[[Ljava/lang/Class;";
}
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ return dex_file.GetFieldTypeDescriptor(field_id);
}
+
+ StringPiece GetTypeDescriptorAsStringPiece() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t field_index = field_->GetDexFieldIndex();
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
+ return StringPiece(GetTypeDescriptor());
+ }
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(field_id.type_idx_);
+ return dex_file.StringDataAsStringPieceByIdx(type_id.descriptor_idx_);
+ }
+
Primitive::Type GetTypeAsPrimitiveType()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return Primitive::GetType(GetTypeDescriptor()[0]);
}
+
bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Primitive::Type type = GetTypeAsPrimitiveType();
return type != Primitive::kPrimNot;
}
+
size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Primitive::Type type = GetTypeAsPrimitiveType();
return Primitive::FieldSize(type);
@@ -324,18 +359,17 @@ class FieldHelper {
const char* GetDeclaringClassDescriptor()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t field_index = field_->GetDexFieldIndex();
- if (!field_->GetDeclaringClass()->IsProxyClass()) {
- const DexFile& dex_file = GetDexFile();
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
- return dex_file.GetFieldDeclaringClassDescriptor(field_id);
- } else {
+ if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
DCHECK(field_->IsStatic());
DCHECK_LT(field_index, 2U);
// 0 == Class[] interfaces; 1 == Class[][] throws;
ClassHelper kh(field_->GetDeclaringClass());
- declaring_class_descriptor_ = kh.GetDescriptor();
+ declaring_class_descriptor_ = kh.GetDescriptorAsStringPiece().as_string();
return declaring_class_descriptor_.c_str();
}
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ return dex_file.GetFieldDeclaringClassDescriptor(field_id);
}
private:
@@ -417,7 +451,7 @@ class MethodHelper {
const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
- if (dex_method_idx != DexFile::kDexNoIndex) {
+ if (LIKELY(dex_method_idx != DexFile::kDexNoIndex)) {
return dex_file.GetMethodName(dex_file.GetMethodId(dex_method_idx));
} else {
Runtime* runtime = Runtime::Current();
@@ -435,6 +469,16 @@ class MethodHelper {
}
}
+ StringPiece GetNameAsStringPiece() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile& dex_file = GetDexFile();
+ uint32_t dex_method_idx = method_->GetDexMethodIndex();
+ if (UNLIKELY(dex_method_idx == DexFile::kDexNoIndex)) {
+ return StringPiece(GetName());
+ }
+ const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+ return dex_file.StringDataAsStringPieceByIdx(method_id.name_idx_);
+ }
+
mirror::String* GetNameAsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
@@ -460,13 +504,13 @@ class MethodHelper {
return shorty_len_;
}
- const std::string GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
if (dex_method_idx != DexFile::kDexNoIndex) {
return dex_file.GetMethodSignature(dex_file.GetMethodId(dex_method_idx));
} else {
- return "<no signature>";
+ return Signature::NoSignature();
}
}
@@ -508,11 +552,22 @@ class MethodHelper {
const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
- if (dex_method_idx != DexFile::kDexNoIndex) {
- return dex_file.GetMethodDeclaringClassDescriptor(dex_file.GetMethodId(dex_method_idx));
- } else {
+ if (UNLIKELY(dex_method_idx == DexFile::kDexNoIndex)) {
return "<runtime method>";
}
+ return dex_file.GetMethodDeclaringClassDescriptor(dex_file.GetMethodId(dex_method_idx));
+ }
+
+ StringPiece GetDeclaringClassDescriptorAsStringPiece()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile& dex_file = GetDexFile();
+ uint32_t dex_method_idx = method_->GetDexMethodIndex();
+ if (UNLIKELY(dex_method_idx == DexFile::kDexNoIndex)) {
+ return StringPiece("<runtime method>");
+ }
+ const DexFile::MethodId& mid = dex_file.GetMethodId(dex_method_idx);
+ const DexFile::TypeId& type_id = dex_file.GetTypeId(mid.class_idx_);
+ return dex_file.StringDataAsStringPieceByIdx(type_id.descriptor_idx_);
}
const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -536,7 +591,7 @@ class MethodHelper {
}
bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return IsStatic() && StringPiece(GetName()) == "<clinit>";
+ return IsStatic() && GetNameAsStringPiece() == "<clinit>";
}
size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -569,16 +624,21 @@ class MethodHelper {
bool HasSameNameAndSignature(MethodHelper* other)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile& dex_file = GetDexFile();
+ const DexFile::MethodId& mid = dex_file.GetMethodId(method_->GetDexMethodIndex());
if (GetDexCache() == other->GetDexCache()) {
- const DexFile& dex_file = GetDexFile();
- const DexFile::MethodId& mid = dex_file.GetMethodId(method_->GetDexMethodIndex());
const DexFile::MethodId& other_mid =
dex_file.GetMethodId(other->method_->GetDexMethodIndex());
return mid.name_idx_ == other_mid.name_idx_ && mid.proto_idx_ == other_mid.proto_idx_;
}
- StringPiece name(GetName());
- StringPiece other_name(other->GetName());
- return name == other_name && GetSignature() == other->GetSignature();
+ const DexFile& other_dex_file = other->GetDexFile();
+ const DexFile::MethodId& other_mid =
+ other_dex_file.GetMethodId(other->method_->GetDexMethodIndex());
+ if (dex_file.StringDataAsStringPieceByIdx(mid.name_idx_) !=
+ other_dex_file.StringDataAsStringPieceByIdx(other_mid.name_idx_)) {
+ return false; // Name mismatch.
+ }
+ return dex_file.GetMethodSignature(mid) == other_dex_file.GetMethodSignature(other_mid);
}
const DexFile::CodeItem* GetCodeItem()
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 8e23cbb153..e95fdb9226 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -38,16 +38,16 @@ ReferenceTable::ReferenceTable(const char* name, size_t initial_size, size_t max
ReferenceTable::~ReferenceTable() {
}
-void ReferenceTable::Add(const mirror::Object* obj) {
+void ReferenceTable::Add(mirror::Object* obj) {
DCHECK(obj != NULL);
- if (entries_.size() == max_size_) {
+ if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
<< "overflowed (" << max_size_ << " entries)";
}
entries_.push_back(obj);
}
-void ReferenceTable::Remove(const mirror::Object* obj) {
+void ReferenceTable::Remove(mirror::Object* obj) {
// We iterate backwards on the assumption that references are LIFO.
for (int i = entries_.size() - 1; i >= 0; --i) {
if (entries_[i] == obj) {
@@ -232,8 +232,8 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
}
void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- for (const auto& ref : entries_) {
- visitor(ref, arg);
+ for (auto& ref : entries_) {
+ ref = visitor(const_cast<mirror::Object*>(ref), arg);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index e369fd0de5..37b31723ae 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -39,9 +39,9 @@ class ReferenceTable {
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(const mirror::Object* obj);
+ void Add(mirror::Object* obj);
- void Remove(const mirror::Object* obj);
+ void Remove(mirror::Object* obj);
size_t Size() const;
@@ -50,7 +50,7 @@ class ReferenceTable {
void VisitRoots(RootVisitor* visitor, void* arg);
private:
- typedef std::vector<const mirror::Object*> Table;
+ typedef std::vector<mirror::Object*> Table;
static void Dump(std::ostream& os, const Table& entries)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 3e58b4bd94..4ff7349833 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -323,7 +323,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
}
JValue boxed_value;
- std::string src_descriptor(ClassHelper(o->GetClass()).GetDescriptor());
+ const StringPiece src_descriptor(ClassHelper(o->GetClass()).GetDescriptorAsStringPiece());
mirror::Class* src_class = NULL;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::ArtField* primitive_field = o->GetClass()->GetIFields()->Get(0);
@@ -356,7 +356,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
StringPrintf("%s has type %s, got %s",
UnboxingFailureKind(m, index, f).c_str(),
PrettyDescriptor(dst_class).c_str(),
- PrettyDescriptor(src_descriptor.c_str()).c_str()).c_str());
+ PrettyDescriptor(src_descriptor.data()).c_str()).c_str());
return false;
}
diff --git a/runtime/root_visitor.h b/runtime/root_visitor.h
index 3aa9b4bac0..a2d898b43c 100644
--- a/runtime/root_visitor.h
+++ b/runtime/root_visitor.h
@@ -23,7 +23,8 @@ class Object;
} // namespace mirror
class StackVisitor;
-typedef void (RootVisitor)(const mirror::Object* root, void* arg);
+typedef mirror::Object* (RootVisitor)(mirror::Object* root, void* arg)
+ __attribute__((warn_unused_result));
typedef void (VerifyRootVisitor)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor);
typedef bool (IsMarkedTester)(const mirror::Object* object, void* arg);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 09d1447d7b..b4ce37fe1c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -99,7 +99,8 @@ Runtime::Runtime()
instrumentation_(),
use_compile_time_class_path_(false),
main_thread_group_(NULL),
- system_thread_group_(NULL) {
+ system_thread_group_(NULL),
+ quick_alloc_entry_points_instrumentation_counter_(0) {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
callee_save_methods_[i] = NULL;
}
@@ -319,6 +320,12 @@ size_t ParseIntegerOrDie(const std::string& s) {
return result;
}
+void Runtime::SweepSystemWeaks(RootVisitor* visitor, void* arg) {
+ GetInternTable()->SweepInternTableWeaks(visitor, arg);
+ GetMonitorList()->SweepMonitorList(visitor, arg);
+ GetJavaVM()->SweepJniWeakGlobals(visitor, arg);
+}
+
Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, bool ignore_unrecognized) {
UniquePtr<ParsedOptions> parsed(new ParsedOptions());
const char* boot_class_path_string = getenv("BOOTCLASSPATH");
@@ -1049,6 +1056,9 @@ void Runtime::SetStatsEnabled(bool new_state) {
GetStats()->Clear(~0);
// TODO: wouldn't it make more sense to clear _all_ threads' stats?
Thread::Current()->GetStats()->Clear(~0);
+ InstrumentQuickAllocEntryPoints();
+ } else {
+ UninstrumentQuickAllocEntryPoints();
}
stats_enabled_ = new_state;
}
@@ -1139,12 +1149,17 @@ void Runtime::VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_di
void Runtime::VisitNonThreadRoots(RootVisitor* visitor, void* arg) {
java_vm_->VisitRoots(visitor, arg);
- if (pre_allocated_OutOfMemoryError_ != NULL) {
- visitor(pre_allocated_OutOfMemoryError_, arg);
+ if (pre_allocated_OutOfMemoryError_ != nullptr) {
+ pre_allocated_OutOfMemoryError_ = reinterpret_cast<mirror::Throwable*>(
+ visitor(pre_allocated_OutOfMemoryError_, arg));
+ DCHECK(pre_allocated_OutOfMemoryError_ != nullptr);
}
- visitor(resolution_method_, arg);
+ resolution_method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(resolution_method_, arg));
+ DCHECK(resolution_method_ != nullptr);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- visitor(callee_save_methods_[i], arg);
+ callee_save_methods_[i] = reinterpret_cast<mirror::ArtMethod*>(
+ visitor(callee_save_methods_[i], arg));
+ DCHECK(callee_save_methods_[i] != nullptr);
}
}
@@ -1271,4 +1286,46 @@ void Runtime::SetCompileTimeClassPath(jobject class_loader, std::vector<const De
compile_time_class_paths_.Put(class_loader, class_path);
}
+static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
+ thread->ResetQuickAllocEntryPointsForThread();
+}
+
+void SetQuickAllocEntryPointsInstrumented(bool instrumented);
+
+void Runtime::InstrumentQuickAllocEntryPoints() {
+ ThreadList* tl = thread_list_;
+ Thread* self = Thread::Current();
+ tl->SuspendAll();
+ {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(self, *Locks::thread_list_lock_);
+ DCHECK_LE(quick_alloc_entry_points_instrumentation_counter_, 0);
+ int old_counter = quick_alloc_entry_points_instrumentation_counter_++;
+ if (old_counter == 0) {
+ // If it was disabled, enable it.
+ SetQuickAllocEntryPointsInstrumented(true);
+ tl->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+ }
+ }
+ tl->ResumeAll();
+}
+
+void Runtime::UninstrumentQuickAllocEntryPoints() {
+ ThreadList* tl = thread_list_;
+ Thread* self = Thread::Current();
+ tl->SuspendAll();
+ {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(self, *Locks::thread_list_lock_);
+ DCHECK_LT(quick_alloc_entry_points_instrumentation_counter_, 0);
+ int new_counter = --quick_alloc_entry_points_instrumentation_counter_;
+ if (new_counter == 0) {
+ // Disable it if the counter becomes zero.
+ SetQuickAllocEntryPointsInstrumented(false);
+ tl->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+ }
+ }
+ tl->ResumeAll();
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 365d2d860b..552cfdf009 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -320,6 +320,10 @@ class Runtime {
void VisitNonConcurrentRoots(RootVisitor* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+ // system weak is updated to be the visitor's returned value.
+ void SweepSystemWeaks(RootVisitor* visitor, void* arg);
+
// Returns a special method that calls into a trampoline for runtime method resolution
mirror::ArtMethod* GetResolutionMethod() const {
CHECK(HasResolutionMethod());
@@ -394,6 +398,9 @@ class Runtime {
const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
+ void InstrumentQuickAllocEntryPoints();
+ void UninstrumentQuickAllocEntryPoints();
+
private:
static void InitPlatformSignalHandlers();
@@ -510,6 +517,8 @@ class Runtime {
jobject main_thread_group_;
jobject system_thread_group_;
+ int quick_alloc_entry_points_instrumentation_counter_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index 81f0dff217..a1f8a6693f 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -30,7 +30,8 @@ class SirtRef {
self_->PushSirt(&sirt_);
}
~SirtRef() {
- CHECK(self_->PopSirt() == &sirt_);
+ StackIndirectReferenceTable* top_sirt = self_->PopSirt();
+ DCHECK_EQ(top_sirt, &sirt_);
}
T& operator*() const { return *get(); }
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 206bff3425..17156647eb 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -148,8 +148,8 @@ uint32_t StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kin
const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
size_t frame_size = m->GetFrameSizeInBytes();
- return GetVReg(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
- frame_size, vreg);
+ return *GetVRegAddr(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
+ frame_size, vreg);
}
} else {
return cur_shadow_frame_->GetVReg(vreg);
diff --git a/runtime/stack.h b/runtime/stack.h
index 7c87f4555c..700b7f1960 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -68,8 +68,7 @@ class ShadowFrame {
static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
mirror::ArtMethod* method, uint32_t dex_pc) {
uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
- ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
- return sf;
+ return Create(num_vregs, link, method, dex_pc, memory);
}
// Create ShadowFrame for interpreter using provided memory.
@@ -154,7 +153,12 @@ class ShadowFrame {
mirror::Object* GetVRegReference(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
if (HasReferenceArray()) {
- return References()[i];
+ mirror::Object* ref = References()[i];
+ // If the vreg reference is not equal to the vreg then the vreg reference is stale.
+ if (reinterpret_cast<uint32_t>(ref) != vregs_[i]) {
+ return nullptr;
+ }
+ return ref;
} else {
const uint32_t* vreg = &vregs_[i];
return *reinterpret_cast<mirror::Object* const*>(vreg);
@@ -467,13 +471,14 @@ class StackVisitor {
uintptr_t GetGPR(uint32_t reg) const;
void SetGPR(uint32_t reg, uintptr_t value);
- uint32_t GetVReg(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
+ // This is a fast-path for getting/setting values in a quick frame.
+ uint32_t* GetVRegAddr(mirror::ArtMethod** cur_quick_frame, const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
byte* vreg_addr = reinterpret_cast<byte*>(cur_quick_frame) + offset;
- return *reinterpret_cast<uint32_t*>(vreg_addr);
+ return reinterpret_cast<uint32_t*>(vreg_addr);
}
uintptr_t GetReturnPc() const;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index c22f2cd921..4552062319 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -19,11 +19,24 @@
#include "thread.h"
+#include <pthread.h>
+
#include "base/mutex-inl.h"
#include "cutils/atomic-inline.h"
namespace art {
+inline Thread* Thread::Current() {
+ // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+ // that we can replace this with a direct %fs access on x86.
+ if (!is_started_) {
+ return NULL;
+ } else {
+ void* thread = pthread_getspecific(Thread::pthread_key_self_);
+ return reinterpret_cast<Thread*>(thread);
+ }
+}
+
inline ThreadState Thread::SetState(ThreadState new_state) {
// Cannot use this code to change into Runnable as changing to Runnable should fail if
// old_state_and_flags.suspend_request is true.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e8326ea982..7040337f04 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -108,6 +108,12 @@ void Thread::InitTlsEntryPoints() {
&quick_entrypoints_);
}
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+
+void Thread::ResetQuickAllocEntryPointsForThread() {
+ ResetQuickAllocEntryPoints(&quick_entrypoints_);
+}
+
void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
deoptimization_shadow_frame_ = sf;
}
@@ -1012,9 +1018,10 @@ void Thread::AssertNoPendingException() const {
}
}
-static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
+static mirror::Object* MonitorExitVisitor(mirror::Object* object, void* arg)
+ NO_THREAD_SAFETY_ANALYSIS {
Thread* self = reinterpret_cast<Thread*>(arg);
- mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
+ mirror::Object* entered_monitor = object;
if (self->HoldsLock(entered_monitor)) {
LOG(WARNING) << "Calling MonitorExit on object "
<< object << " (" << PrettyTypeOf(object) << ")"
@@ -1022,6 +1029,7 @@ static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREA
<< *Thread::Current() << " which is detaching";
entered_monitor->MonitorExit(self);
}
+ return object;
}
void Thread::Destroy() {
@@ -1151,8 +1159,12 @@ void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
size_t num_refs = cur->NumberOfReferences();
for (size_t j = 0; j < num_refs; j++) {
mirror::Object* object = cur->GetReference(j);
- if (object != NULL) {
- visitor(object, arg);
+ if (object != nullptr) {
+ const mirror::Object* new_obj = visitor(object, arg);
+ DCHECK(new_obj != nullptr);
+ if (new_obj != object) {
+ cur->SetReference(j, const_cast<mirror::Object*>(new_obj));
+ }
}
}
}
@@ -1381,24 +1393,23 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
// Transition into runnable state to work on Object*/Array*
ScopedObjectAccess soa(env);
// Decode the internal stack trace into the depth, method trace and PC trace
- mirror::ObjectArray<mirror::Object>* method_trace =
- soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
- int32_t depth = method_trace->GetLength() - 1;
- mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
+ int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
jobjectArray result;
- mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
+
if (output_array != NULL) {
// Reuse the array we were given.
result = output_array;
- java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
// ...adjusting the number of frames we'll write to not exceed the array length.
- depth = std::min(depth, java_traces->GetLength());
+ const int32_t traces_length =
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
+ depth = std::min(depth, traces_length);
} else {
// Create java_trace array and place in local reference table
- java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
+ mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
+ class_linker->AllocStackTraceElementArray(soa.Self(), depth);
if (java_traces == NULL) {
return NULL;
}
@@ -1411,9 +1422,12 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
MethodHelper mh;
for (int32_t i = 0; i < depth; ++i) {
+ mirror::ObjectArray<mirror::Object>* method_trace =
+ soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
mh.ChangeMethod(method);
+ mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
uint32_t dex_pc = pc_trace->Get(i);
int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
// Allocate element, potentially triggering GC
@@ -1436,8 +1450,9 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
return NULL;
}
const char* source_file = mh.GetDeclaringClassSourceFile();
- SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- source_file));
+ SirtRef<mirror::String> source_name_object(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(),
+ source_file));
mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
class_name_object.get(),
method_name_object.get(),
@@ -1446,13 +1461,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
if (obj == NULL) {
return NULL;
}
-#ifdef MOVING_GARBAGE_COLLECTOR
- // Re-read after potential GC
- java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
- method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
- pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
-#endif
- java_traces->Set(i, obj);
+ soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set(i, obj);
}
return result;
}
@@ -2016,8 +2025,11 @@ class ReferenceMapVisitor : public StackVisitor {
// SIRT for JNI or References for interpreter.
for (size_t reg = 0; reg < num_regs; ++reg) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
} else {
@@ -2037,8 +2049,11 @@ class ReferenceMapVisitor : public StackVisitor {
for (size_t reg = 0; reg < num_regs; ++reg) {
if (TestBitmap(reg, reg_bitmap)) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
}
}
}
@@ -2069,19 +2084,25 @@ class ReferenceMapVisitor : public StackVisitor {
// Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
uint32_t vmap_offset;
- mirror::Object* ref;
if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
- kReferenceVReg));
- ref = reinterpret_cast<mirror::Object*>(val);
+ int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(GetGPR(vmap_reg));
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ SetGPR(vmap_reg, reinterpret_cast<uintptr_t>(new_ref));
+ }
+ }
} else {
- ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
- core_spills, fp_spills, frame_size,
- reg));
- }
-
- if (ref != NULL) {
- visitor_(ref, reg, this);
+ uint32_t* reg_addr =
+ GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size, reg);
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(*reg_addr);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = visitor_(ref, reg, this);
+ if (ref != new_ref) {
+ *reg_addr = reinterpret_cast<uint32_t>(new_ref);
+ }
+ }
}
}
}
@@ -2107,8 +2128,8 @@ class RootCallbackVisitor {
public:
RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
- void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
- visitor_(obj, arg_);
+ mirror::Object* operator()(mirror::Object* obj, size_t, const StackVisitor*) const {
+ return visitor_(obj, arg_);
}
private:
@@ -2132,67 +2153,17 @@ class VerifyCallbackVisitor {
void* const arg_;
};
-struct VerifyRootWrapperArg {
- VerifyRootVisitor* visitor;
- void* arg;
-};
-
-static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
- VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
- wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
-}
-
-void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
- // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
- // don't have.
- VerifyRootWrapperArg wrapperArg;
- wrapperArg.arg = arg;
- wrapperArg.visitor = visitor;
-
- if (opeer_ != NULL) {
- VerifyRootWrapperCallback(opeer_, &wrapperArg);
- }
- if (exception_ != NULL) {
- VerifyRootWrapperCallback(exception_, &wrapperArg);
- }
- throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- if (class_loader_override_ != NULL) {
- VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
- }
- jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
- jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
-
- // Visit roots on this thread's stack
- Context* context = GetLongJumpContext();
- VerifyCallbackVisitor visitorToCallback(visitor, arg);
- ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
- mapper.WalkStack();
- ReleaseLongJumpContext(context);
-
- std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
- for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
- mirror::Object* this_object = (*it).this_object_;
- if (this_object != NULL) {
- VerifyRootWrapperCallback(this_object, &wrapperArg);
- }
- mirror::ArtMethod* method = (*it).method_;
- VerifyRootWrapperCallback(method, &wrapperArg);
- }
-}
-
void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
- if (opeer_ != NULL) {
- visitor(opeer_, arg);
+ if (opeer_ != nullptr) {
+ opeer_ = visitor(opeer_, arg);
}
- if (exception_ != NULL) {
- visitor(exception_, arg);
+ if (exception_ != nullptr) {
+ exception_ = reinterpret_cast<mirror::Throwable*>(visitor(exception_, arg));
}
throw_location_.VisitRoots(visitor, arg);
- if (class_loader_override_ != NULL) {
- visitor(class_loader_override_, arg);
+ if (class_loader_override_ != nullptr) {
+ class_loader_override_ = reinterpret_cast<mirror::ClassLoader*>(
+ visitor(class_loader_override_, arg));
}
jni_env_->locals.VisitRoots(visitor, arg);
jni_env_->monitors.VisitRoots(visitor, arg);
@@ -2206,24 +2177,26 @@ void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
mapper.WalkStack();
ReleaseLongJumpContext(context);
- for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
- mirror::Object* this_object = frame.this_object_;
- if (this_object != NULL) {
- visitor(this_object, arg);
+ for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
+ if (frame.this_object_ != nullptr) {
+ frame.this_object_ = visitor(frame.this_object_, arg);
+ DCHECK(frame.this_object_ != nullptr);
}
- mirror::ArtMethod* method = frame.method_;
- visitor(method, arg);
+ frame.method_ = reinterpret_cast<mirror::ArtMethod*>(visitor(frame.method_, arg));
+ DCHECK(frame.method_ != nullptr);
}
}
-static void VerifyObject(const mirror::Object* root, void* arg) {
- gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
- heap->VerifyObject(root);
+static mirror::Object* VerifyRoot(mirror::Object* root, void* arg) {
+ DCHECK(root != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<gc::Heap*>(arg)->VerifyObject(root);
+ return root;
}
void Thread::VerifyStackImpl() {
UniquePtr<Context> context(Context::Create());
- RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
+ RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap());
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
mapper.WalkStack();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 40e3f5fbb2..2d9e0097d0 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -17,8 +17,6 @@
#ifndef ART_RUNTIME_THREAD_H_
#define ART_RUNTIME_THREAD_H_
-#include <pthread.h>
-
#include <bitset>
#include <deque>
#include <iosfwd>
@@ -104,16 +102,7 @@ class PACKED(4) Thread {
// Reset internal state of child thread after fork.
void InitAfterFork();
- static Thread* Current() {
- // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
- // that we can replace this with a direct %fs access on x86.
- if (!is_started_) {
- return NULL;
- } else {
- void* thread = pthread_getspecific(Thread::pthread_key_self_);
- return reinterpret_cast<Thread*>(thread);
- }
- }
+ static Thread* Current();
static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
mirror::Object* thread_peer)
@@ -406,9 +395,6 @@ class PACKED(4) Thread {
void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyRoots(VerifyRootVisitor* visitor, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
//
@@ -590,6 +576,8 @@ class PACKED(4) Thread {
void AtomicClearFlag(ThreadFlag flag);
+ void ResetQuickAllocEntryPointsForThread();
+
private:
// We have no control over the size of 'bool', but want our boolean fields
// to be 4-byte quantities.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 671924a620..44cf810178 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -21,6 +21,7 @@
#include <unistd.h>
#include "base/mutex.h"
+#include "base/mutex-inl.h"
#include "base/timing_logger.h"
#include "debugger.h"
#include "thread.h"
@@ -568,10 +569,24 @@ void ThreadList::VisitRoots(RootVisitor* visitor, void* arg) const {
}
}
+struct VerifyRootWrapperArg {
+ VerifyRootVisitor* visitor;
+ void* arg;
+};
+
+static mirror::Object* VerifyRootWrapperCallback(mirror::Object* root, void* arg) {
+ VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
+ wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
+ return root;
+}
+
void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const {
+ VerifyRootWrapperArg wrapper;
+ wrapper.visitor = visitor;
+ wrapper.arg = arg;
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
- thread->VerifyRoots(visitor, arg);
+ thread->VisitRoots(VerifyRootWrapperCallback, &wrapper);
}
}
diff --git a/runtime/throw_location.cc b/runtime/throw_location.cc
index e428511d48..01497ef0e5 100644
--- a/runtime/throw_location.cc
+++ b/runtime/throw_location.cc
@@ -34,11 +34,14 @@ std::string ThrowLocation::Dump() const {
}
void ThrowLocation::VisitRoots(RootVisitor* visitor, void* arg) {
- if (this_object_ != NULL) {
- visitor(this_object_, arg);
+ if (this_object_ != nullptr) {
+ this_object_ = const_cast<mirror::Object*>(visitor(this_object_, arg));
+ DCHECK(this_object_ != nullptr);
}
- if (method_ != NULL) {
- visitor(method_, arg);
+ if (method_ != nullptr) {
+ method_ = const_cast<mirror::ArtMethod*>(
+ reinterpret_cast<const mirror::ArtMethod*>(visitor(method_, arg)));
+ DCHECK(method_ != nullptr);
}
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 7b25306177..ec95a87146 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -667,7 +667,7 @@ void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>&
mh.ChangeMethod(method);
os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(),
- mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile());
+ mh.GetSignature().ToString().c_str(), mh.GetDeclaringClassSourceFile());
}
}
diff --git a/runtime/utf-inl.h b/runtime/utf-inl.h
new file mode 100644
index 0000000000..d8c258b5d9
--- /dev/null
+++ b/runtime/utf-inl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_UTF_INL_H_
+#define ART_RUNTIME_UTF_INL_H_
+
+#include "utf.h"
+
+namespace art {
+
+inline uint16_t GetUtf16FromUtf8(const char** utf8_data_in) {
+ uint8_t one = *(*utf8_data_in)++;
+ if ((one & 0x80) == 0) {
+ // one-byte encoding
+ return one;
+ }
+ // two- or three-byte encoding
+ uint8_t two = *(*utf8_data_in)++;
+ if ((one & 0x20) == 0) {
+ // two-byte encoding
+ return ((one & 0x1f) << 6) | (two & 0x3f);
+ }
+ // three-byte encoding
+ uint8_t three = *(*utf8_data_in)++;
+ return ((one & 0x0f) << 12) | ((two & 0x3f) << 6) | (three & 0x3f);
+}
+
+inline int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1,
+ const char* utf8_2) {
+ for (;;) {
+ if (*utf8_1 == '\0') {
+ return (*utf8_2 == '\0') ? 0 : -1;
+ } else if (*utf8_2 == '\0') {
+ return 1;
+ }
+
+ int c1 = GetUtf16FromUtf8(&utf8_1);
+ int c2 = GetUtf16FromUtf8(&utf8_2);
+
+ if (c1 != c2) {
+ return c1 > c2 ? 1 : -1;
+ }
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_UTF_INL_H_
diff --git a/runtime/utf.cc b/runtime/utf.cc
index 1add7d9a68..5ec2ea1c36 100644
--- a/runtime/utf.cc
+++ b/runtime/utf.cc
@@ -19,6 +19,7 @@
#include "base/logging.h"
#include "mirror/array.h"
#include "mirror/object-inl.h"
+#include "utf-inl.h"
namespace art {
@@ -84,41 +85,6 @@ int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count) {
return hash;
}
-
-uint16_t GetUtf16FromUtf8(const char** utf8_data_in) {
- uint8_t one = *(*utf8_data_in)++;
- if ((one & 0x80) == 0) {
- // one-byte encoding
- return one;
- }
- // two- or three-byte encoding
- uint8_t two = *(*utf8_data_in)++;
- if ((one & 0x20) == 0) {
- // two-byte encoding
- return ((one & 0x1f) << 6) | (two & 0x3f);
- }
- // three-byte encoding
- uint8_t three = *(*utf8_data_in)++;
- return ((one & 0x0f) << 12) | ((two & 0x3f) << 6) | (three & 0x3f);
-}
-
-int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1, const char* utf8_2) {
- for (;;) {
- if (*utf8_1 == '\0') {
- return (*utf8_2 == '\0') ? 0 : -1;
- } else if (*utf8_2 == '\0') {
- return 1;
- }
-
- int c1 = GetUtf16FromUtf8(&utf8_1);
- int c2 = GetUtf16FromUtf8(&utf8_2);
-
- if (c1 != c2) {
- return c1 > c2 ? 1 : -1;
- }
- }
-}
-
int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8_1, const uint16_t* utf8_2) {
for (;;) {
if (*utf8_1 == '\0') {
diff --git a/runtime/utf.h b/runtime/utf.h
index 4c9a1d959e..cc5e6d48c2 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -29,9 +29,10 @@
* See http://en.wikipedia.org/wiki/UTF-8#Modified_UTF-8 for the details.
*/
namespace art {
+
namespace mirror {
-template<class T> class PrimitiveArray;
-typedef PrimitiveArray<uint16_t> CharArray;
+ template<class T> class PrimitiveArray;
+ typedef PrimitiveArray<uint16_t> CharArray;
} // namespace mirror
/*
diff --git a/runtime/utils.cc b/runtime/utils.cc
index ae10c766d1..b97239fd18 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -34,7 +34,7 @@
#include "mirror/string.h"
#include "object_utils.h"
#include "os.h"
-#include "utf.h"
+#include "utf-inl.h"
#if !defined(HAVE_POSIX_CLOCKS)
#include <sys/time.h>
@@ -367,11 +367,13 @@ std::string PrettyMethod(const mirror::ArtMethod* m, bool with_signature) {
result += '.';
result += mh.GetName();
if (with_signature) {
- std::string signature(mh.GetSignature());
- if (signature == "<no signature>") {
- return result + signature;
+ const Signature signature = mh.GetSignature();
+ std::string sig_as_string(signature.ToString());
+ if (signature == Signature::NoSignature()) {
+ return result + sig_as_string;
}
- result = PrettyReturnType(signature.c_str()) + " " + result + PrettyArguments(signature.c_str());
+ result = PrettyReturnType(sig_as_string.c_str()) + " " + result +
+ PrettyArguments(sig_as_string.c_str());
}
return result;
}
@@ -385,11 +387,13 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with
result += '.';
result += dex_file.GetMethodName(method_id);
if (with_signature) {
- std::string signature(dex_file.GetMethodSignature(method_id));
- if (signature == "<no signature>") {
- return result + signature;
+ const Signature signature = dex_file.GetMethodSignature(method_id);
+ std::string sig_as_string(signature.ToString());
+ if (signature == Signature::NoSignature()) {
+ return result + sig_as_string;
}
- result = PrettyReturnType(signature.c_str()) + " " + result + PrettyArguments(signature.c_str());
+ result = PrettyReturnType(sig_as_string.c_str()) + " " + result +
+ PrettyArguments(sig_as_string.c_str());
}
return result;
}
@@ -641,7 +645,7 @@ std::string JniLongName(const mirror::ArtMethod* m) {
long_name += JniShortName(m);
long_name += "__";
- std::string signature(MethodHelper(m).GetSignature());
+ std::string signature(MethodHelper(m).GetSignature().ToString());
signature.erase(0, 1);
signature.erase(signature.begin() + signature.find(')'), signature.end());
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 924a1bb377..36b409d142 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -51,7 +51,8 @@ void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* fl
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
DCHECK_GT(insns_size, 0U);
-
+ register_lines_.reset(new RegisterLine*[insns_size]());
+ size_ = insns_size;
for (uint32_t i = 0; i < insns_size; i++) {
bool interesting = false;
switch (mode) {
@@ -68,7 +69,16 @@ void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* fl
break;
}
if (interesting) {
- pc_to_register_line_.Put(i, new RegisterLine(registers_size, verifier));
+ register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+ }
+ }
+}
+
+PcToRegisterLineTable::~PcToRegisterLineTable() {
+ for (size_t i = 0; i < size_; i++) {
+ delete register_lines_[i];
+ if (kIsDebugBuild) {
+ register_lines_[i] = nullptr;
}
}
}
@@ -80,7 +90,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const mirror::Class* kla
return kNoFailure;
}
mirror::Class* super = klass->GetSuperClass();
- if (super == NULL && StringPiece(ClassHelper(klass).GetDescriptor()) != "Ljava/lang/Object;") {
+ if (super == NULL && ClassHelper(klass).GetDescriptorAsStringPiece() != "Ljava/lang/Object;") {
*error = "Verifier rejected class ";
*error += PrettyDescriptor(klass);
*error += " that has no super class";
@@ -293,6 +303,7 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca
dex_method_idx_(dex_method_idx),
mirror_method_(method),
method_access_flags_(method_access_flags),
+ return_type_(nullptr),
dex_file_(dex_file),
dex_cache_(dex_cache),
class_loader_(class_loader),
@@ -300,7 +311,7 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca
code_item_(code_item),
declaring_class_(NULL),
interesting_dex_pc_(-1),
- monitor_enter_dex_pcs_(NULL),
+ monitor_enter_dex_pcs_(nullptr),
have_pending_hard_failure_(false),
have_pending_runtime_throw_failure_(false),
new_instance_count_(0),
@@ -309,7 +320,7 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca
allow_soft_failures_(allow_soft_failures),
has_check_casts_(false),
has_virtual_or_interface_invokes_(false) {
- DCHECK(class_def != NULL);
+ DCHECK(class_def != nullptr);
}
void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
@@ -1034,8 +1045,8 @@ bool MethodVerifier::VerifyCodeFlow() {
this);
- work_line_.reset(new RegisterLine(registers_size, this));
- saved_line_.reset(new RegisterLine(registers_size, this));
+ work_line_.reset(RegisterLine::Create(registers_size, this));
+ saved_line_.reset(RegisterLine::Create(registers_size, this));
/* Initialize register types of method arguments. */
if (!SetTypesFromSignature()) {
@@ -1935,7 +1946,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
if (!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
!cast_type.GetClass()->IsInterface() && !cast_type.IsAssignableFrom(orig_type)) {
- RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this);
+ RegisterLine* update_line = RegisterLine::Create(code_item_->registers_size_, this);
if (inst->Opcode() == Instruction::IF_EQZ) {
fallthrough_line.reset(update_line);
} else {
@@ -2131,20 +2142,30 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL,
is_range, is_super);
- const char* descriptor;
- if (called_method == NULL) {
+ const RegType* return_type = nullptr;
+ if (called_method != nullptr) {
+ MethodHelper mh(called_method);
+ mirror::Class* return_type_class = mh.GetReturnType();
+ if (return_type_class != nullptr) {
+ return_type = &reg_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
+ return_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ Thread* self = Thread::Current();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
+ }
+ if (return_type == nullptr) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- } else {
- descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
+ const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ return_type = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
- if (!return_type.IsLowHalf()) {
- work_line_->SetResultRegisterType(return_type);
+ if (!return_type->IsLowHalf()) {
+ work_line_->SetResultRegisterType(*return_type);
} else {
- work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(&reg_types_));
}
just_set_result = true;
break;
@@ -2159,7 +2180,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
if (called_method == NULL) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
- is_constructor = StringPiece(dex_file_->GetMethodName(method_id)) == "<init>";
+ is_constructor = dex_file_->StringDataAsStringPieceByIdx(method_id.name_idx_) == "<init>";
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
return_type_descriptor = dex_file_->StringByTypeIdx(return_type_idx);
} else {
@@ -2889,7 +2910,7 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
}
mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
- MethodType method_type) {
+ MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
@@ -2906,7 +2927,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
- std::string signature(dex_file_->CreateMethodSignature(method_id.proto_idx_, NULL));
+ const Signature signature = dex_file_->GetMethodSignature(method_id);
if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) {
res_method = klass->FindDirectMethod(name, signature);
@@ -3497,22 +3518,26 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const char* descriptor;
- mirror::ClassLoader* loader;
+ const RegType* field_type = nullptr;
if (field != NULL) {
- descriptor = FieldHelper(field).GetTypeDescriptor();
- loader = field->GetDeclaringClass()->GetClassLoader();
- } else {
+ FieldHelper fh(field);
+ mirror::Class* field_type_class = fh.GetType(false);
+ if (field_type_class != nullptr) {
+ field_type = &reg_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
+ field_type_class->CannotBeAssignedFromOtherTypes());
+ }
+ }
+ if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
- descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- loader = class_loader_;
+ const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
+ mirror::ClassLoader* loader = class_loader_;
+ field_type = &reg_types_.FromDescriptor(loader, descriptor, false);
}
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
- if (field_type.Equals(insn_type) ||
- (field_type.IsFloat() && insn_type.IsInteger()) ||
- (field_type.IsDouble() && insn_type.IsLong())) {
+ if (field_type->Equals(insn_type) ||
+ (field_type->IsFloat() && insn_type.IsInteger()) ||
+ (field_type->IsDouble() && insn_type.IsLong())) {
// expected that read is of the correct primitive type or that int reads are reading
// floats or long reads are reading doubles
} else {
@@ -3525,7 +3550,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
return;
}
} else {
- if (!insn_type.IsAssignableFrom(field_type)) {
+ if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
<< "' but found type '" << field_type
@@ -3534,10 +3559,10 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
return;
}
}
- if (!field_type.IsLowHalf()) {
- work_line_->SetRegisterType(vregA, field_type);
+ if (!field_type->IsLowHalf()) {
+ work_line_->SetRegisterType(vregA, *field_type);
} else {
- work_line_->SetRegisterTypeWide(vregA, field_type, field_type.HighHalf(&reg_types_));
+ work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
}
}
@@ -3551,36 +3576,38 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const char* descriptor;
- mirror::ClassLoader* loader;
- if (field != NULL) {
- descriptor = FieldHelper(field).GetTypeDescriptor();
- loader = field->GetDeclaringClass()->GetClassLoader();
- } else {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
- descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- loader = class_loader_;
- }
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ const RegType* field_type = nullptr;
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
<< " from other class " << GetDeclaringClass();
return;
}
+ FieldHelper fh(field);
+ mirror::Class* field_type_class = fh.GetType(false);
+ if (field_type_class != nullptr) {
+ field_type = &reg_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
+ field_type_class->CannotBeAssignedFromOtherTypes());
+ }
+ }
+ if (field_type == nullptr) {
+ const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
+ mirror::ClassLoader* loader = class_loader_;
+ field_type = &reg_types_.FromDescriptor(loader, descriptor, false);
}
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
- VerifyPrimitivePut(field_type, insn_type, vregA);
+ VerifyPrimitivePut(*field_type, insn_type, vregA);
} else {
- if (!insn_type.IsAssignableFrom(field_type)) {
+ if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
<< "' but found type '" << field_type
<< "' in put-object";
return;
}
- work_line_->VerifyRegisterType(vregA, field_type);
+ work_line_->VerifyRegisterType(vregA, *field_type);
}
}
@@ -3648,14 +3675,21 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
return;
}
- const char* descriptor = FieldHelper(field).GetTypeDescriptor();
- mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ FieldHelper fh(field);
+ mirror::Class* field_type_class = fh.GetType(false);
+ const RegType* field_type;
+ if (field_type_class != nullptr) {
+ field_type = &reg_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
+ field_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
+ fh.GetTypeDescriptor(), false);
+ }
const uint32_t vregA = inst->VRegA_22c();
if (is_primitive) {
- if (field_type.Equals(insn_type) ||
- (field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
- (field_type.IsDouble() && insn_type.IsLongTypes())) {
+ if (field_type->Equals(insn_type) ||
+ (field_type->IsFloat() && insn_type.IsIntegralTypes()) ||
+ (field_type->IsDouble() && insn_type.IsLongTypes())) {
// expected that read is of the correct primitive type or that int reads are reading
// floats or long reads are reading doubles
} else {
@@ -3668,7 +3702,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
return;
}
} else {
- if (!insn_type.IsAssignableFrom(field_type)) {
+ if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
<< "' but found type '" << field_type
@@ -3677,10 +3711,10 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
return;
}
}
- if (!field_type.IsLowHalf()) {
- work_line_->SetRegisterType(vregA, field_type);
+ if (!field_type->IsLowHalf()) {
+ work_line_->SetRegisterType(vregA, *field_type);
} else {
- work_line_->SetRegisterTypeWide(vregA, field_type, field_type.HighHalf(&reg_types_));
+ work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
}
}
@@ -3794,7 +3828,7 @@ bool MethodVerifier::UpdateRegisters(uint32_t next_insn, const RegisterLine* mer
}
} else {
UniquePtr<RegisterLine> copy(gDebugVerify ?
- new RegisterLine(target_line->NumRegs(), this) :
+ RegisterLine::Create(target_line->NumRegs(), this) :
NULL);
if (gDebugVerify) {
copy->CopyFromLine(target_line);
@@ -3822,11 +3856,28 @@ InstructionFlags* MethodVerifier::CurrentInsnFlags() {
}
const RegType& MethodVerifier::GetMethodReturnType() {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
- uint16_t return_type_idx = proto_id.return_type_idx_;
- const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
- return reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (return_type_ == nullptr) {
+ if (mirror_method_ != NULL) {
+ MethodHelper mh(mirror_method_);
+ mirror::Class* return_type_class = mh.GetReturnType();
+ if (return_type_class != nullptr) {
+ return_type_ =&reg_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
+ return_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ Thread* self = Thread::Current();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
+ }
+ if (return_type_ == nullptr) {
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
+ uint16_t return_type_idx = proto_id.return_type_idx_;
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
+ return_type_ = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ }
+ }
+ return *return_type_;
}
const RegType& MethodVerifier::GetDeclaringClass() {
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 073a2f76be..7f337419a4 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -110,10 +110,8 @@ enum RegisterTrackingMode {
// execution of that instruction.
class PcToRegisterLineTable {
public:
- PcToRegisterLineTable() {}
- ~PcToRegisterLineTable() {
- STLDeleteValues(&pc_to_register_line_);
- }
+ PcToRegisterLineTable() : size_(0) {}
+ ~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
// about what's in which register, but for verification purposes we only need to store it at
@@ -122,17 +120,13 @@ class PcToRegisterLineTable {
uint16_t registers_size, MethodVerifier* verifier);
RegisterLine* GetLine(size_t idx) {
- auto result = pc_to_register_line_.find(idx);
- if (result == pc_to_register_line_.end()) {
- return NULL;
- } else {
- return result->second;
- }
+ DCHECK_LT(idx, size_);
+ return register_lines_[idx];
}
private:
- typedef SafeMap<int32_t, RegisterLine*> Table;
- Table pc_to_register_line_;
+ UniquePtr<RegisterLine*[]> register_lines_;
+ size_t size_;
};
// The verifier
@@ -688,6 +682,7 @@ class MethodVerifier {
// Its object representation if known.
mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
+ const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
mirror::DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 25f840cc56..50d1583bbb 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -99,7 +99,7 @@ std::string PreciseConstType::Dump() const {
}
std::string BooleanType::Dump() const {
- return "boolean";
+ return "Boolean";
}
std::string ConflictType::Dump() const {
@@ -111,7 +111,7 @@ std::string ByteType::Dump() const {
}
std::string ShortType::Dump() const {
- return "short";
+ return "Short";
}
std::string CharType::Dump() const {
@@ -119,15 +119,15 @@ std::string CharType::Dump() const {
}
std::string FloatType::Dump() const {
- return "float";
+ return "Float";
}
std::string LongLoType::Dump() const {
- return "long (Low Half)";
+ return "Long (Low Half)";
}
std::string LongHiType::Dump() const {
- return "long (High Half)";
+ return "Long (High Half)";
}
std::string DoubleLoType::Dump() const {
@@ -461,7 +461,6 @@ std::string ImpreciseConstType::Dump() const {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
- CHECK(IsPreciseConstant());
result << "Zero/null";
} else {
result << "Imprecise ";
@@ -762,11 +761,6 @@ bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
return AssignableFrom(*this, src, true);
}
-int32_t ConstantType::ConstantValue() const {
- DCHECK(IsConstantTypes());
- return constant_;
-}
-
int32_t ConstantType::ConstantValueLo() const {
DCHECK(IsConstantLo());
return constant_;
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 865ba20d44..f3717330eb 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -574,9 +574,12 @@ class ConstantType : public RegType {
// If this is a 32-bit constant, what is the value? This value may be imprecise in which case
// the value represents part of the integer range of values that may be held in the register.
- virtual int32_t ConstantValue() const;
- virtual int32_t ConstantValueLo() const;
- virtual int32_t ConstantValueHi() const;
+ int32_t ConstantValue() const {
+ DCHECK(IsConstantTypes());
+ return constant_;
+ }
+ int32_t ConstantValueLo() const;
+ int32_t ConstantValueHi() const;
bool IsZero() const {
return IsPreciseConstant() && ConstantValue() == 0;
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 295e27198d..fc9e5c98f7 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -23,17 +23,6 @@
namespace art {
namespace verifier {
-template <class Type>
-Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
- mirror::Class* klass = NULL;
- // Try loading the class from linker.
- if (!descriptor.empty()) {
- klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor.c_str());
- }
- Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
- RegTypeCache::primitive_count_++;
- return entry;
-}
inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
@@ -41,6 +30,16 @@ inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const
DCHECK(result != NULL);
return *result;
}
+
+inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+ // We only expect 0 to be a precise constant.
+ DCHECK(value != 0 || precise);
+ if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
+ return *small_precise_constants_[value - kMinSmallConstant];
+ }
+ return FromCat1NonSmallConstant(value, precise);
+}
+
} // namespace verifier
} // namespace art
#endif // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 2c18132c0b..fd7030011d 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -26,8 +26,8 @@ namespace art {
namespace verifier {
bool RegTypeCache::primitive_initialized_ = false;
-uint16_t RegTypeCache::primitive_start_ = 0;
uint16_t RegTypeCache::primitive_count_ = 0;
+PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
static bool MatchingPrecisionForClass(RegType* entry, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -44,7 +44,7 @@ static bool MatchingPrecisionForClass(RegType* entry, bool precise)
}
}
-void RegTypeCache::FillPrimitiveTypes() {
+void RegTypeCache::FillPrimitiveAndSmallConstantTypes() {
entries_.push_back(UndefinedType::GetInstance());
entries_.push_back(ConflictType::GetInstance());
entries_.push_back(BooleanType::GetInstance());
@@ -57,6 +57,11 @@ void RegTypeCache::FillPrimitiveTypes() {
entries_.push_back(FloatType::GetInstance());
entries_.push_back(DoubleLoType::GetInstance());
entries_.push_back(DoubleHiType::GetInstance());
+ for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ int32_t i = value - kMinSmallConstant;
+ DCHECK_EQ(entries_.size(), small_precise_constants_[i]->GetId());
+ entries_.push_back(small_precise_constants_[i]);
+ }
DCHECK_EQ(entries_.size(), primitive_count_);
}
@@ -205,6 +210,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descr
}
const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+ DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
// primitive classes are final.
@@ -232,12 +238,12 @@ const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* kl
RegTypeCache::~RegTypeCache() {
CHECK_LE(primitive_count_, entries_.size());
// Delete only the non primitive types.
- if (entries_.size() == kNumPrimitives) {
- // All entries are primitive, nothing to delete.
+ if (entries_.size() == kNumPrimitivesAndSmallConstants) {
+ // All entries are from the global pool, nothing to delete.
return;
}
std::vector<RegType*>::iterator non_primitive_begin = entries_.begin();
- std::advance(non_primitive_begin, kNumPrimitives);
+ std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
STLDeleteContainerPointers(non_primitive_begin, entries_.end());
}
@@ -255,12 +261,29 @@ void RegTypeCache::ShutDown() {
FloatType::Destroy();
DoubleLoType::Destroy();
DoubleHiType::Destroy();
+ for (uint16_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
+ delete type;
+ }
+
RegTypeCache::primitive_initialized_ = false;
RegTypeCache::primitive_count_ = 0;
}
}
-void RegTypeCache::CreatePrimitiveTypes() {
+template <class Type>
+Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
+ mirror::Class* klass = NULL;
+ // Try loading the class from linker.
+ if (!descriptor.empty()) {
+ klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor.c_str());
+ }
+ Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
+ RegTypeCache::primitive_count_++;
+ return entry;
+}
+
+void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
CreatePrimitiveTypeInstance<UndefinedType>("");
CreatePrimitiveTypeInstance<ConflictType>("");
CreatePrimitiveTypeInstance<BooleanType>("Z");
@@ -273,6 +296,11 @@ void RegTypeCache::CreatePrimitiveTypes() {
CreatePrimitiveTypeInstance<FloatType>("F");
CreatePrimitiveTypeInstance<DoubleLoType>("D");
CreatePrimitiveTypeInstance<DoubleHiType>("D");
+ for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+ PreciseConstType* type = new PreciseConstType(value, primitive_count_);
+ small_precise_constants_[value - kMinSmallConstant] = type;
+ primitive_count_++;
+ }
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
@@ -331,29 +359,28 @@ const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
return *entry;
}
-const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
- RegType* entry = NULL;
- RegType* cur_entry = NULL;
+const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+ UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- cur_entry = entries_[i];
+ RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedReference() &&
down_cast<UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc() == allocation_pc &&
(cur_entry->GetDescriptor() == descriptor)) {
- return *cur_entry;
+ return *down_cast<UnresolvedUninitializedRefType*>(cur_entry);
}
}
entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- cur_entry = entries_[i];
+ RegType* cur_entry = entries_[i];
if (cur_entry->IsUninitializedReference() &&
down_cast<UninitializedReferenceType*>(cur_entry)
->GetAllocationPc() == allocation_pc &&
cur_entry->GetClass() == klass) {
- return *cur_entry;
+ return *down_cast<UninitializedReferenceType*>(cur_entry);
}
}
entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
@@ -404,27 +431,33 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
return *entry;
}
-const RegType& RegTypeCache::ByteConstant() {
- return FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+const ImpreciseConstType& RegTypeCache::ByteConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::ShortConstant() {
- return FromCat1Const(std::numeric_limits<jshort>::min(), false);
+const ImpreciseConstType& RegTypeCache::ShortConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::IntConstant() {
- return FromCat1Const(std::numeric_limits<jint>::max(), false);
+const ImpreciseConstType& RegTypeCache::IntConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
- RegType* entry;
+const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+ UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
cur_entry->GetDescriptor() == descriptor) {
- return *cur_entry;
+ return *down_cast<UninitializedType*>(cur_entry);
}
}
entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
@@ -433,7 +466,7 @@ const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) {
- return *cur_entry;
+ return *down_cast<UninitializedType*>(cur_entry);
}
}
entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
@@ -442,16 +475,16 @@ const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
return *entry;
}
-const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstType(value, entries_.size());
} else {
@@ -461,15 +494,15 @@ const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
return *entry;
}
-const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
(down_cast<ConstantType*>(cur_entry))->ConstantValueLo() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstLoType(value, entries_.size());
} else {
@@ -479,15 +512,15 @@ const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
return *entry;
}
-const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
(down_cast<ConstantType*>(cur_entry))->ConstantValueHi() == value) {
- return *cur_entry;
+ return *down_cast<ConstantType*>(cur_entry);
}
}
- RegType* entry;
+ ConstantType* entry;
if (precise) {
entry = new PreciseConstHiType(value, entries_.size());
} else {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 77f58934da..a9f8bff784 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -35,19 +35,18 @@ namespace verifier {
class RegType;
-const size_t kNumPrimitives = 12;
class RegTypeCache {
public:
explicit RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
entries_.reserve(64);
- FillPrimitiveTypes();
+ FillPrimitiveAndSmallConstantTypes();
}
~RegTypeCache();
static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
- CreatePrimitiveTypes();
- CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives);
+ CreatePrimitiveAndSmallConstantTypes();
+ CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitivesAndSmallConstants);
RegTypeCache::primitive_initialized_ = true;
}
}
@@ -55,17 +54,13 @@ class RegTypeCache {
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template <class Type>
- static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FillPrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat1Const(int32_t value, bool precise)
+ const ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat2ConstLo(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromCat2ConstHi(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -129,34 +124,56 @@ class RegTypeCache {
const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- const RegType& UninitializedThisArgument(const RegType& type)
+ const UninitializedType& UninitializedThisArgument(const RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromUninitialized(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
private:
- std::vector<RegType*> entries_;
- static bool primitive_initialized_;
- static uint16_t primitive_start_;
- static uint16_t primitive_count_;
- static void CreatePrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Whether or not we're allowed to load classes.
- const bool can_load_classes_;
+ void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ClearException();
bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <class Type>
+ static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // The actual storage for the RegTypes.
+ std::vector<RegType*> entries_;
+
+ // A quick look up for popular small constants.
+ static constexpr int32_t kMinSmallConstant = -1;
+ static constexpr int32_t kMaxSmallConstant = 4;
+ static PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+
+ static constexpr size_t kNumPrimitivesAndSmallConstants =
+ 12 + (kMaxSmallConstant - kMinSmallConstant + 1);
+
+ // Have the well known global primitives been created?
+ static bool primitive_initialized_;
+
+ // Number of well known primitives that will be copied into a RegTypeCache upon construction.
+ static uint16_t primitive_count_;
+
+ // Whether or not we're allowed to load classes.
+ const bool can_load_classes_;
+
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index a615cc1273..1a41657264 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -456,8 +456,7 @@ bool RegisterLine::VerifyMonitorStackEmpty() const {
bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
bool changed = false;
- CHECK(NULL != incoming_line);
- CHECK(NULL != line_.get());
+ DCHECK(incoming_line != nullptr);
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index f19dccac17..8b2dadb119 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -17,7 +17,6 @@
#ifndef ART_RUNTIME_VERIFIER_REGISTER_LINE_H_
#define ART_RUNTIME_VERIFIER_REGISTER_LINE_H_
-#include <deque>
#include <vector>
#include "dex_instruction.h"
@@ -51,12 +50,10 @@ enum TypeCategory {
// stack of entered monitors (identified by code unit offset).
class RegisterLine {
public:
- RegisterLine(size_t num_regs, MethodVerifier* verifier)
- : line_(new uint16_t[num_regs]),
- verifier_(verifier),
- num_regs_(num_regs) {
- memset(line_.get(), 0, num_regs_ * sizeof(uint16_t));
- SetResultTypeToUnknown();
+ static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
+ uint8_t* memory = new uint8_t[sizeof(RegisterLine) + (num_regs * sizeof(uint16_t))];
+ RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
+ return rl;
}
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
@@ -108,7 +105,7 @@ class RegisterLine {
void CopyFromLine(const RegisterLine* src) {
DCHECK_EQ(num_regs_, src->num_regs_);
- memcpy(line_.get(), src->line_.get(), num_regs_ * sizeof(uint16_t));
+ memcpy(&line_, &src->line_, num_regs_ * sizeof(uint16_t));
monitors_ = src->monitors_;
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
@@ -116,7 +113,7 @@ class RegisterLine {
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillWithGarbage() {
- memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t));
+ memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
while (!monitors_.empty()) {
monitors_.pop_back();
}
@@ -161,7 +158,7 @@ class RegisterLine {
int CompareLine(const RegisterLine* line2) const {
DCHECK(monitors_ == line2->monitors_);
// TODO: DCHECK(reg_to_lock_depths_ == line2->reg_to_lock_depths_);
- return memcmp(line_.get(), line2->line_.get(), num_regs_ * sizeof(uint16_t));
+ return memcmp(&line_, &line2->line_, num_regs_ * sizeof(uint16_t));
}
size_t NumRegs() const {
@@ -339,23 +336,30 @@ class RegisterLine {
reg_to_lock_depths_.erase(reg);
}
+ RegisterLine(size_t num_regs, MethodVerifier* verifier)
+ : verifier_(verifier),
+ num_regs_(num_regs) {
+ memset(&line_, 0, num_regs_ * sizeof(uint16_t));
+ SetResultTypeToUnknown();
+ }
+
// Storage for the result register's type, valid after an invocation
uint16_t result_[2];
- // An array of RegType Ids associated with each dex register
- UniquePtr<uint16_t[]> line_;
-
// Back link to the verifier
MethodVerifier* verifier_;
// Length of reg_types_
const uint32_t num_regs_;
// A stack of monitor enter locations
- std::deque<uint32_t> monitors_;
+ std::vector<uint32_t> monitors_;
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
// monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5
SafeMap<uint32_t, uint32_t> reg_to_lock_depths_;
+
+ // An array of RegType Ids associated with each dex register.
+ uint16_t line_[0];
};
std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs);
diff --git a/test/Android.mk b/test/Android.mk
index 6f498e8c02..08ec03a01a 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -23,8 +23,8 @@ include art/build/Android.common.mk
TEST_DEX_DIRECTORIES := \
AbstractMethod \
AllFields \
- CreateMethodSignature \
ExceptionHandle \
+ GetMethodSignature \
Interfaces \
Main \
MyClass \
diff --git a/test/CreateMethodSignature/CreateMethodSignature.java b/test/GetMethodSignature/GetMethodSignature.java
index f6cd6ae6fd..c2ba948d60 100644
--- a/test/CreateMethodSignature/CreateMethodSignature.java
+++ b/test/GetMethodSignature/GetMethodSignature.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-class CreateMethodSignature {
+class GetMethodSignature {
Float m1(int a, double b, long c, Object d) { return null; }
- CreateMethodSignature m2(boolean x, short y, char z) { return null; }
+ GetMethodSignature m2(boolean x, short y, char z) { return null; }
}
diff --git a/test/run-test b/test/run-test
index 11dcfc57de..c449e84c35 100755
--- a/test/run-test
+++ b/test/run-test
@@ -269,7 +269,7 @@ elif [ "$build_only" = "yes" ]; then
fi
fi
# Clean up extraneous files that are not used by tests.
- find $tmp_dir -mindepth 1 ! -regex ".*/\(.*jar\|$build_output\|$expected\)" | xargs rm -rf
+ find $tmp_dir -mindepth 1 ! -regex ".*/\(.*jar\|$output\|$expected\)" | xargs rm -rf
exit 0
else
"./${build}" >"$build_output" 2>&1