summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2013-08-09 22:05:32 -0700
committerIan Rogers <irogers@google.com>2013-08-12 06:16:03 +0000
commit96faf5b363d922ae91cf25404dee0e87c740c7c5 (patch)
treed397fd63cde72e897490e21b3af3c355db7a36d4
parent49bded21270e8087e11d933d7b19aee22c0d8649 (diff)
downloadart-96faf5b363d922ae91cf25404dee0e87c740c7c5.tar.gz
art-96faf5b363d922ae91cf25404dee0e87c740c7c5.tar.bz2
art-96faf5b363d922ae91cf25404dee0e87c740c7c5.zip
Uleb128 compression of vmap and mapping table.
Bug 9437697. Change-Id: I30bcb97d12cd8b46d3b2cdcbdd358f08fbb9947a (cherry picked from commit 1809a72a66d245ae598582d658b93a24ac3bf01e)
-rw-r--r--compiler/dex/quick/codegen_util.cc54
-rw-r--r--compiler/dex/quick/mir_to_lir.h4
-rw-r--r--compiler/image_writer.cc4
-rw-r--r--compiler/leb128_encoder.h63
-rw-r--r--compiler/oat_writer.cc16
-rw-r--r--compiler/oat_writer.h4
-rw-r--r--oatdump/oatdump.cc144
-rw-r--r--runtime/base/macros.h2
-rw-r--r--runtime/common_test.h4
-rw-r--r--runtime/compiled_method.cc28
-rw-r--r--runtime/compiled_method.h17
-rw-r--r--runtime/exception_test.cc29
-rw-r--r--runtime/leb128.h26
-rw-r--r--runtime/mapping_table.h196
-rw-r--r--runtime/mirror/abstract_method-inl.h8
-rw-r--r--runtime/mirror/abstract_method.cc70
-rw-r--r--runtime/mirror/abstract_method.h65
-rw-r--r--runtime/oat_file.cc8
-rw-r--r--runtime/oat_file.h8
-rw-r--r--runtime/stack.cc9
-rw-r--r--runtime/stack.h72
-rw-r--r--runtime/thread.cc5
-rw-r--r--runtime/vmap_table.h115
-rw-r--r--test/ReferenceMap/stack_walk_refmap_jni.cc34
24 files changed, 609 insertions, 376 deletions
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index c9780fa537..5f6f3d51e9 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -17,6 +17,7 @@
#include "dex/compiler_internals.h"
#include "dex_file-inl.h"
#include "gc_map.h"
+#include "mapping_table.h"
#include "mir_to_lir-inl.h"
#include "verifier/dex_gc_map.h"
#include "verifier/method_verifier.h"
@@ -515,15 +516,35 @@ void Mir2Lir::CreateMappingTables() {
}
}
if (kIsDebugBuild) {
- DCHECK(VerifyCatchEntries());
+ CHECK(VerifyCatchEntries());
+ }
+ CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U);
+ CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U);
+ uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2;
+ uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2;
+ encoded_mapping_table_.PushBack(total_entries);
+ encoded_mapping_table_.PushBack(pc2dex_entries);
+ encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end());
+ encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end());
+ if (kIsDebugBuild) {
+ // Verify the encoded table holds the expected data.
+ MappingTable table(&encoded_mapping_table_.GetData()[0]);
+ CHECK_EQ(table.TotalSize(), total_entries);
+ CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
+ CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
+ MappingTable::PcToDexIterator it = table.PcToDexBegin();
+ for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
+ CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
+ ++i;
+ CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
+ }
+ MappingTable::DexToPcIterator it2 = table.DexToPcBegin();
+ for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
+ CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
+ ++i;
+ CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
+ }
}
- combined_mapping_table_.push_back(pc2dex_mapping_table_.size() +
- dex2pc_mapping_table_.size());
- combined_mapping_table_.push_back(pc2dex_mapping_table_.size());
- combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(),
- pc2dex_mapping_table_.end());
- combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(),
- dex2pc_mapping_table_.end());
}
class NativePcToReferenceMapBuilder {
@@ -980,28 +1001,35 @@ void Mir2Lir::Materialize() {
CompiledMethod* Mir2Lir::GetCompiledMethod() {
// Combine vmap tables - core regs, then fp regs - into vmap_table
- std::vector<uint16_t> vmap_table;
+ std::vector<uint16_t> raw_vmap_table;
// Core regs may have been inserted out of order - sort first
std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
for (size_t i = 0 ; i < core_vmap_table_.size(); i++) {
// Copy, stripping out the phys register sort key
- vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
+ raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
}
// If we have a frame, push a marker to take place of lr
if (frame_size_ > 0) {
- vmap_table.push_back(INVALID_VREG);
+ raw_vmap_table.push_back(INVALID_VREG);
} else {
DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
}
// Combine vmap tables - core regs, then fp regs. fp regs already sorted
for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
- vmap_table.push_back(fp_vmap_table_[i]);
+ raw_vmap_table.push_back(fp_vmap_table_[i]);
+ }
+ UnsignedLeb128EncodingVector vmap_encoder;
+ // Prefix the encoded data with its size.
+ vmap_encoder.PushBack(raw_vmap_table.size());
+ typedef std::vector<uint16_t>::const_iterator It;
+ for (It cur = raw_vmap_table.begin(), end = raw_vmap_table.end(); cur != end; ++cur) {
+ vmap_encoder.PushBack(*cur);
}
CompiledMethod* result =
new CompiledMethod(cu_->instruction_set, code_buffer_,
frame_size_, core_spill_mask_, fp_spill_mask_,
- combined_mapping_table_, vmap_table, native_gc_map_);
+ encoded_mapping_table_.GetData(), vmap_encoder.GetData(), native_gc_map_);
return result;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 2794bf5e5b..517fc66a00 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -25,6 +25,7 @@
#include "dex/growable_array.h"
#include "dex/arena_allocator.h"
#include "driver/compiler_driver.h"
+#include "leb128_encoder.h"
#include "safe_map.h"
namespace art {
@@ -760,7 +761,8 @@ class Mir2Lir : public Backend {
*/
int live_sreg_;
CodeBuffer code_buffer_;
- std::vector<uint32_t> combined_mapping_table_;
+ // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
+ UnsignedLeb128EncodingVector encoded_mapping_table_;
std::vector<uint32_t> core_vmap_table_;
std::vector<uint32_t> fp_vmap_table_;
std::vector<uint8_t> native_gc_map_;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 550d642753..3432c8cbee 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -547,11 +547,11 @@ void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy)
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
const byte* mapping_table = GetOatAddress(mapping_table_off);
- copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
+ copy->SetMappingTable(mapping_table);
uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
const byte* vmap_table = GetOatAddress(vmap_table_offset);
- copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
+ copy->SetVmapTable(vmap_table);
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
diff --git a/compiler/leb128_encoder.h b/compiler/leb128_encoder.h
new file mode 100644
index 0000000000..e9a1c32a33
--- /dev/null
+++ b/compiler/leb128_encoder.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LEB128_ENCODER_H_
+#define ART_COMPILER_LEB128_ENCODER_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class UnsignedLeb128EncodingVector {
+ public:
+ UnsignedLeb128EncodingVector() {
+ }
+
+ void PushBack(uint32_t value) {
+ bool done = false;
+ do {
+ uint8_t out = value & 0x7f;
+ if (out != value) {
+ data_.push_back(out | 0x80);
+ value >>= 7;
+ } else {
+ data_.push_back(out);
+ done = true;
+ }
+ } while (!done);
+ }
+
+ template<typename It>
+ void InsertBack(It cur, It end) {
+ for (; cur != end; ++cur) {
+ PushBack(*cur);
+ }
+ }
+
+ const std::vector<uint8_t>& GetData() const {
+ return data_;
+ }
+
+ private:
+ std::vector<uint8_t> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnsignedLeb128EncodingVector);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_LEB128_ENCODER_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 21c5317b69..ce88cf6dd6 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -322,12 +322,12 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
core_spill_mask = compiled_method->GetCoreSpillMask();
fp_spill_mask = compiled_method->GetFpSpillMask();
- const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+ const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
mapping_table_offset = (mapping_table_size == 0) ? 0 : offset;
// Deduplicate mapping tables
- SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter =
+ SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
mapping_table_offsets_.find(&mapping_table);
if (mapping_iter != mapping_table_offsets_.end()) {
mapping_table_offset = mapping_iter->second;
@@ -337,12 +337,12 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
oat_header_->UpdateChecksum(&mapping_table[0], mapping_table_size);
}
- const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+ const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
vmap_table_offset = (vmap_table_size == 0) ? 0 : offset;
// Deduplicate vmap tables
- SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter =
+ SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
vmap_table_offsets_.find(&vmap_table);
if (vmap_iter != vmap_table_offsets_.end()) {
vmap_table_offset = vmap_iter->second;
@@ -717,11 +717,11 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
DCHECK_OFFSET();
#endif
- const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+ const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
// Deduplicate mapping tables
- SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter =
+ SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
mapping_table_offsets_.find(&mapping_table);
if (mapping_iter != mapping_table_offsets_.end() &&
relative_offset != method_offsets.mapping_table_offset_) {
@@ -741,11 +741,11 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
}
DCHECK_OFFSET();
- const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+ const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
// Deduplicate vmap tables
- SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter =
+ SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
vmap_table_offsets_.find(&vmap_table);
if (vmap_iter != vmap_table_offsets_.end() &&
relative_offset != method_offsets.vmap_table_offset_) {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e6cc0bce80..f7801f5d7f 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -226,8 +226,8 @@ class OatWriter {
// code mappings for deduplication
SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > code_offsets_;
- SafeMap<const std::vector<uint16_t>*, uint32_t, MapCompare<std::vector<uint16_t> > > vmap_table_offsets_;
- SafeMap<const std::vector<uint32_t>*, uint32_t, MapCompare<std::vector<uint32_t> > > mapping_table_offsets_;
+ SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > vmap_table_offsets_;
+ SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > mapping_table_offsets_;
SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > gc_map_offsets_;
DISALLOW_COPY_AND_ASSIGN(OatWriter);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 0a34686ce7..a717f192b9 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -35,6 +35,7 @@
#include "gc/space/space-inl.h"
#include "image.h"
#include "indenter.h"
+#include "mapping_table.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
@@ -48,6 +49,7 @@
#include "safe_map.h"
#include "scoped_thread_state_change.h"
#include "verifier/method_verifier.h"
+#include "vmap_table.h"
namespace art {
@@ -390,40 +392,39 @@ class OatDumper {
}
void DumpVmap(std::ostream& os, const OatFile::OatMethod& oat_method) {
- const uint16_t* raw_table = oat_method.GetVmapTable();
- if (raw_table == NULL) {
- return;
- }
- const VmapTable vmap_table(raw_table);
- bool first = true;
- bool processing_fp = false;
- uint32_t spill_mask = oat_method.GetCoreSpillMask();
- for (size_t i = 0; i < vmap_table.size(); i++) {
- uint16_t dex_reg = vmap_table[i];
- uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
- processing_fp ? kFloatVReg : kIntVReg);
- os << (first ? "v" : ", v") << dex_reg;
- if (!processing_fp) {
- os << "/r" << cpu_reg;
- } else {
- os << "/fr" << cpu_reg;
- }
- first = false;
- if (!processing_fp && dex_reg == 0xFFFF) {
- processing_fp = true;
- spill_mask = oat_method.GetFpSpillMask();
+ const uint8_t* raw_table = oat_method.GetVmapTable();
+ if (raw_table != NULL) {
+ const VmapTable vmap_table(raw_table);
+ bool first = true;
+ bool processing_fp = false;
+ uint32_t spill_mask = oat_method.GetCoreSpillMask();
+ for (size_t i = 0; i < vmap_table.Size(); i++) {
+ uint16_t dex_reg = vmap_table[i];
+ uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
+ processing_fp ? kFloatVReg : kIntVReg);
+ os << (first ? "v" : ", v") << dex_reg;
+ if (!processing_fp) {
+ os << "/r" << cpu_reg;
+ } else {
+ os << "/fr" << cpu_reg;
+ }
+ first = false;
+ if (!processing_fp && dex_reg == 0xFFFF) {
+ processing_fp = true;
+ spill_mask = oat_method.GetFpSpillMask();
+ }
}
+ os << "\n";
}
- os << "\n";
}
void DescribeVReg(std::ostream& os, const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item, size_t reg, VRegKind kind) {
- const uint16_t* raw_table = oat_method.GetVmapTable();
+ const uint8_t* raw_table = oat_method.GetVmapTable();
if (raw_table != NULL) {
const VmapTable vmap_table(raw_table);
uint32_t vmap_offset;
- if (vmap_table.IsInContext(reg, vmap_offset, kind)) {
+ if (vmap_table.IsInContext(reg, kind, &vmap_offset)) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? oat_method.GetFpSpillMask()
: oat_method.GetCoreSpillMask();
@@ -471,67 +472,50 @@ class OatDumper {
}
void DumpMappingTable(std::ostream& os, const OatFile::OatMethod& oat_method) {
- const uint32_t* raw_table = oat_method.GetMappingTable();
const void* code = oat_method.GetCode();
- if (raw_table == NULL || code == NULL) {
- return;
- }
-
- ++raw_table;
- uint32_t length = *raw_table;
- ++raw_table;
- if (length == 0) {
+ if (code == NULL) {
return;
}
- uint32_t pc_to_dex_entries = *raw_table;
- ++raw_table;
- if (pc_to_dex_entries != 0) {
- os << "suspend point mappings {\n";
- } else {
- os << "catch entry mappings {\n";
- }
- Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
- std::ostream indent_os(&indent_filter);
- for (size_t i = 0; i < length; i += 2) {
- const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(code) + raw_table[i];
- uint32_t dex_pc = raw_table[i + 1];
- indent_os << StringPrintf("%p -> 0x%04x\n", native_pc, dex_pc);
- if (i + 2 == pc_to_dex_entries && pc_to_dex_entries != length) {
- // Separate the pc -> dex from dex -> pc sections
- indent_os << std::flush;
- os << "}\ncatch entry mappings {\n";
+ MappingTable table(oat_method.GetMappingTable());
+ if (table.TotalSize() != 0) {
+ Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
+ std::ostream indent_os(&indent_filter);
+ if (table.PcToDexSize() != 0) {
+ typedef MappingTable::PcToDexIterator It;
+ os << "suspend point mappings {\n";
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+ }
+ os << "}\n";
+ }
+ if (table.DexToPcSize() != 0) {
+ typedef MappingTable::DexToPcIterator It;
+ os << "catch entry mappings {\n";
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+ }
+ os << "}\n";
}
}
- os << "}\n";
}
- uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method, size_t offset,
- bool suspend_point_mapping) {
- const uint32_t* raw_table = oat_method.GetMappingTable();
- if (raw_table != NULL) {
- ++raw_table;
- uint32_t length = *raw_table;
- ++raw_table;
- uint32_t pc_to_dex_entries = *raw_table;
- ++raw_table;
- size_t start, end;
- if (suspend_point_mapping) {
- start = 0;
- end = pc_to_dex_entries;
- } else {
- start = pc_to_dex_entries;
- end = length;
+ uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method,
+ size_t offset, bool suspend_point_mapping) {
+ MappingTable table(oat_method.GetMappingTable());
+ if (suspend_point_mapping && table.PcToDexSize() > 0) {
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (offset == cur.NativePcOffset()) {
+ os << "suspend point dex PC: 0x" << cur.DexPc() << "\n";
+ return cur.DexPc();
+ }
}
- for (size_t i = start; i < end; i += 2) {
- if (offset == raw_table[i]) {
- uint32_t dex_pc = raw_table[i + 1];
- if (suspend_point_mapping) {
- os << "suspend point dex PC: 0x";
- } else {
- os << "catch entry dex PC: 0x";
- }
- os << std::hex << dex_pc << std::dec << "\n";
- return dex_pc;
+ } else if (!suspend_point_mapping && table.DexToPcSize() > 0) {
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (offset == cur.NativePcOffset()) {
+ os << "catch entry dex PC: 0x" << cur.DexPc() << "\n";
+ return cur.DexPc();
}
}
}
@@ -1019,13 +1003,13 @@ class ImageDumper {
}
size_t pc_mapping_table_bytes =
- state->ComputeOatSize(method->GetMappingTableRaw(), &first_occurrence);
+ state->ComputeOatSize(method->GetMappingTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes =
- state->ComputeOatSize(method->GetVmapTableRaw(), &first_occurrence);
+ state->ComputeOatSize(method->GetVmapTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 879c10c1ae..6531858f9d 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -142,6 +142,8 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define HOT_ATTR __attribute__ ((hot))
#endif
+#define PURE __attribute__ ((__pure__))
+
// bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(exp) ({ \
diff --git a/runtime/common_test.h b/runtime/common_test.h
index a54361706a..7110e117f3 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -168,8 +168,8 @@ class CommonTest : public testing::Test {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const uint32_t* mapping_table,
- const uint16_t* vmap_table,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
const uint8_t* gc_map) {
return OatFile::OatMethod(NULL,
reinterpret_cast<uint32_t>(code),
diff --git a/runtime/compiled_method.cc b/runtime/compiled_method.cc
index c64c71ed3f..4631cb5db4 100644
--- a/runtime/compiled_method.cc
+++ b/runtime/compiled_method.cc
@@ -112,35 +112,13 @@ CompiledMethod::CompiledMethod(InstructionSet instruction_set,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint32_t>& mapping_table,
- const std::vector<uint16_t>& vmap_table,
+ const std::vector<uint8_t>& mapping_table,
+ const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map)
: CompiledCode(instruction_set, code), frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ mapping_table_(mapping_table), vmap_table_(vmap_table),
gc_map_(native_gc_map) {
- DCHECK_EQ(vmap_table.size(),
- static_cast<uint32_t>(__builtin_popcount(core_spill_mask)
- + __builtin_popcount(fp_spill_mask)));
- CHECK_LE(vmap_table.size(), (1U << 16) - 1); // length must fit in 2^16-1
-
- std::vector<uint32_t> length_prefixed_mapping_table;
- length_prefixed_mapping_table.push_back(mapping_table.size());
- length_prefixed_mapping_table.insert(length_prefixed_mapping_table.end(),
- mapping_table.begin(),
- mapping_table.end());
- DCHECK_EQ(mapping_table.size() + 1, length_prefixed_mapping_table.size());
-
- std::vector<uint16_t> length_prefixed_vmap_table;
- length_prefixed_vmap_table.push_back(vmap_table.size());
- length_prefixed_vmap_table.insert(length_prefixed_vmap_table.end(),
- vmap_table.begin(),
- vmap_table.end());
- DCHECK_EQ(vmap_table.size() + 1, length_prefixed_vmap_table.size());
- DCHECK_EQ(vmap_table.size(), length_prefixed_vmap_table[0]);
-
- mapping_table_ = length_prefixed_mapping_table;
- vmap_table_ = length_prefixed_vmap_table;
- DCHECK_EQ(vmap_table_[0], static_cast<uint32_t>(__builtin_popcount(core_spill_mask) + __builtin_popcount(fp_spill_mask)));
}
CompiledMethod::CompiledMethod(InstructionSet instruction_set,
diff --git a/runtime/compiled_method.h b/runtime/compiled_method.h
index 800dde2208..b3bb20fac4 100644
--- a/runtime/compiled_method.h
+++ b/runtime/compiled_method.h
@@ -103,8 +103,8 @@ class CompiledMethod : public CompiledCode {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint32_t>& mapping_table,
- const std::vector<uint16_t>& vmap_table,
+ const std::vector<uint8_t>& mapping_table,
+ const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map);
// Constructs a CompiledMethod for the JniCompiler.
@@ -147,11 +147,11 @@ class CompiledMethod : public CompiledCode {
return fp_spill_mask_;
}
- const std::vector<uint32_t>& GetMappingTable() const {
+ const std::vector<uint8_t>& GetMappingTable() const {
return mapping_table_;
}
- const std::vector<uint16_t>& GetVmapTable() const {
+ const std::vector<uint8_t>& GetVmapTable() const {
return vmap_table_;
}
@@ -166,10 +166,11 @@ class CompiledMethod : public CompiledCode {
const uint32_t core_spill_mask_;
// For quick code, a bit mask describing spilled FPR callee-save registers.
const uint32_t fp_spill_mask_;
- // For quick code, a map from native PC offset to dex PC.
- std::vector<uint32_t> mapping_table_;
- // For quick code, a map from GPR/FPR register to dex register.
- std::vector<uint16_t> vmap_table_;
+ // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
+ // native PC offset. Size prefixed.
+ std::vector<uint8_t> mapping_table_;
+ // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
+ std::vector<uint8_t> vmap_table_;
// For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
// are live. For portable code, the key is a dalvik PC.
std::vector<uint8_t> gc_map_;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index a7a6d46c98..933b74ace1 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -18,6 +18,7 @@
#include "common_test.h"
#include "dex_file.h"
#include "gtest/gtest.h"
+#include "leb128_encoder.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
@@ -53,17 +54,17 @@ class ExceptionTest : public CommonTest {
fake_code_.push_back(0x70 | i);
}
- fake_mapping_data_.push_back(4); // first element is count
- fake_mapping_data_.push_back(4); // total (non-length) elements
- fake_mapping_data_.push_back(2); // count of pc to dex elements
+ fake_mapping_data_.PushBack(4); // first element is count
+ fake_mapping_data_.PushBack(4); // total (non-length) elements
+ fake_mapping_data_.PushBack(2); // count of pc to dex elements
// --- pc to dex table
- fake_mapping_data_.push_back(3); // offset 3
- fake_mapping_data_.push_back(3); // maps to dex offset 3
+ fake_mapping_data_.PushBack(3); // offset 3
+ fake_mapping_data_.PushBack(3); // maps to dex offset 3
// --- dex to pc table
- fake_mapping_data_.push_back(3); // offset 3
- fake_mapping_data_.push_back(3); // maps to dex offset 3
+ fake_mapping_data_.PushBack(3); // offset 3
+ fake_mapping_data_.PushBack(3); // maps to dex offset 3
- fake_vmap_table_data_.push_back(0);
+ fake_vmap_table_data_.PushBack(0);
fake_gc_map_.push_back(0); // 0 bytes to encode references and native pc offsets.
fake_gc_map_.push_back(0);
@@ -74,24 +75,24 @@ class ExceptionTest : public CommonTest {
ASSERT_TRUE(method_f_ != NULL);
method_f_->SetFrameSizeInBytes(kStackAlignment);
method_f_->SetEntryPointFromCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
- method_f_->SetMappingTable(&fake_mapping_data_[0]);
- method_f_->SetVmapTable(&fake_vmap_table_data_[0]);
+ method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
+ method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
method_f_->SetNativeGcMap(&fake_gc_map_[0]);
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
ASSERT_TRUE(method_g_ != NULL);
method_g_->SetFrameSizeInBytes(kStackAlignment);
method_g_->SetEntryPointFromCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
- method_g_->SetMappingTable(&fake_mapping_data_[0]);
- method_g_->SetVmapTable(&fake_vmap_table_data_[0]);
+ method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
+ method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
method_g_->SetNativeGcMap(&fake_gc_map_[0]);
}
const DexFile* dex_;
std::vector<uint8_t> fake_code_;
- std::vector<uint32_t> fake_mapping_data_;
- std::vector<uint16_t> fake_vmap_table_data_;
+ UnsignedLeb128EncodingVector fake_mapping_data_;
+ UnsignedLeb128EncodingVector fake_vmap_table_data_;
std::vector<uint8_t> fake_gc_map_;
mirror::AbstractMethod* method_f_;
diff --git a/runtime/leb128.h b/runtime/leb128.h
index ca955b0921..6041f8c31f 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -24,8 +24,8 @@ namespace art {
// Reads an unsigned LEB128 value, updating the given pointer to point
// just past the end of the read value. This function tolerates
// non-zero high-order bits in the fifth encoded byte.
-static inline uint32_t DecodeUnsignedLeb128(const byte** data) {
- const byte* ptr = *data;
+static inline uint32_t DecodeUnsignedLeb128(const uint8_t** data) {
+ const uint8_t* ptr = *data;
int result = *(ptr++);
if (result > 0x7f) {
int cur = *(ptr++);
@@ -53,15 +53,15 @@ static inline uint32_t DecodeUnsignedLeb128(const byte** data) {
// just past the end of the read value. This function tolerates
// non-zero high-order bits in the fifth encoded byte.
// It is possible for this function to return -1.
-static inline int32_t DecodeUnsignedLeb128P1(const byte** data) {
+static inline int32_t DecodeUnsignedLeb128P1(const uint8_t** data) {
return DecodeUnsignedLeb128(data) - 1;
}
// Reads a signed LEB128 value, updating the given pointer to point
// just past the end of the read value. This function tolerates
// non-zero high-order bits in the fifth encoded byte.
-static inline int32_t DecodeSignedLeb128(const byte** data) {
- const byte* ptr = *data;
+static inline int32_t DecodeSignedLeb128(const uint8_t** data) {
+ const uint8_t* ptr = *data;
int32_t result = *(ptr++);
if (result <= 0x7f) {
result = (result << 25) >> 25;
@@ -103,22 +103,6 @@ static inline uint32_t UnsignedLeb128Size(uint32_t data) {
return count;
}
-// Writes a 32-bit value in unsigned ULEB128 format.
-// Returns the updated pointer.
-static inline uint8_t* WriteUnsignedLeb128(uint8_t* ptr, uint32_t data) {
- while (true) {
- uint8_t out = data & 0x7f;
- if (out != data) {
- *ptr++ = out | 0x80;
- data >>= 7;
- } else {
- *ptr++ = out;
- break;
- }
- }
- return ptr;
-}
-
} // namespace art
#endif // ART_RUNTIME_LEB128_H_
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
new file mode 100644
index 0000000000..2162008025
--- /dev/null
+++ b/runtime/mapping_table.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MAPPING_TABLE_H_
+#define ART_RUNTIME_MAPPING_TABLE_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+
+namespace art {
+
+// A utility for processing the raw uleb128 encoded mapping table created by the quick compiler.
+class MappingTable {
+ public:
+ explicit MappingTable(const uint8_t* encoded_map) : encoded_table_(encoded_map) {
+ }
+
+ uint32_t TotalSize() const PURE {
+ const uint8_t* table = encoded_table_;
+ if (table == NULL) {
+ return 0;
+ } else {
+ return DecodeUnsignedLeb128(&table);
+ }
+ }
+
+ uint32_t DexToPcSize() const PURE {
+ const uint8_t* table = encoded_table_;
+ if (table == NULL) {
+ return 0;
+ } else {
+ uint32_t total_size = DecodeUnsignedLeb128(&table);
+ uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+ return total_size - pc_to_dex_size;
+ }
+ }
+
+ const uint8_t* FirstDexToPcPtr() const {
+ const uint8_t* table = encoded_table_;
+ if (table != NULL) {
+ DecodeUnsignedLeb128(&table); // Total_size, unused.
+ uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+ for (uint32_t i = 0; i < pc_to_dex_size; ++i) {
+ DecodeUnsignedLeb128(&table); // Move ptr past native PC.
+ DecodeUnsignedLeb128(&table); // Move ptr past dex PC.
+ }
+ }
+ return table;
+ }
+
+ class DexToPcIterator {
+ public:
+ DexToPcIterator(const MappingTable* table, uint32_t element) :
+ table_(table), element_(element), end_(table_->DexToPcSize()), encoded_table_ptr_(NULL),
+ native_pc_offset_(0), dex_pc_(0) {
+ if (element == 0) {
+ encoded_table_ptr_ = table_->FirstDexToPcPtr();
+ native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ } else {
+ DCHECK_EQ(table_->DexToPcSize(), element);
+ }
+ }
+ uint32_t NativePcOffset() const {
+ return native_pc_offset_;
+ }
+ uint32_t DexPc() const {
+ return dex_pc_;
+ }
+ void operator++() {
+ ++element_;
+ if (element_ != end_) { // Avoid reading beyond the end of the table.
+ native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ }
+ }
+ bool operator==(const DexToPcIterator& rhs) const {
+ CHECK(table_ == rhs.table_);
+ return element_ == rhs.element_;
+ }
+ bool operator!=(const DexToPcIterator& rhs) const {
+ CHECK(table_ == rhs.table_);
+ return element_ != rhs.element_;
+ }
+
+ private:
+ const MappingTable* const table_; // The original table.
+ uint32_t element_; // A value in the range 0 to end_.
+ const uint32_t end_; // Equal to table_->DexToPcSize().
+ const uint8_t* encoded_table_ptr_; // Either NULL or points to encoded data after this entry.
+ uint32_t native_pc_offset_; // The current value of native pc offset.
+ uint32_t dex_pc_; // The current value of dex pc.
+ };
+
+ DexToPcIterator DexToPcBegin() const {
+ return DexToPcIterator(this, 0);
+ }
+
+ DexToPcIterator DexToPcEnd() const {
+ uint32_t size = DexToPcSize();
+ return DexToPcIterator(this, size);
+ }
+
+ uint32_t PcToDexSize() const PURE {
+ const uint8_t* table = encoded_table_;
+ if (table == NULL) {
+ return 0;
+ } else {
+ DecodeUnsignedLeb128(&table); // Total_size, unused.
+ uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+ return pc_to_dex_size;
+ }
+ }
+
+ const uint8_t* FirstPcToDexPtr() const {
+ const uint8_t* table = encoded_table_;
+ if (table != NULL) {
+ DecodeUnsignedLeb128(&table); // Total_size, unused.
+ DecodeUnsignedLeb128(&table); // PC to Dex size, unused.
+ }
+ return table;
+ }
+
+ class PcToDexIterator {
+ public:
+ PcToDexIterator(const MappingTable* table, uint32_t element) :
+ table_(table), element_(element), end_(table_->PcToDexSize()), encoded_table_ptr_(NULL),
+ native_pc_offset_(0), dex_pc_(0) {
+ if (element == 0) {
+ encoded_table_ptr_ = table_->FirstPcToDexPtr();
+ native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ } else {
+ DCHECK_EQ(table_->PcToDexSize(), element);
+ }
+ }
+ uint32_t NativePcOffset() const {
+ return native_pc_offset_;
+ }
+ uint32_t DexPc() const {
+ return dex_pc_;
+ }
+ void operator++() {
+ ++element_;
+ if (element_ != end_) { // Avoid reading beyond the end of the table.
+ native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+ }
+ }
+ bool operator==(const PcToDexIterator& rhs) const {
+ CHECK(table_ == rhs.table_);
+ return element_ == rhs.element_;
+ }
+ bool operator!=(const PcToDexIterator& rhs) const {
+ CHECK(table_ == rhs.table_);
+ return element_ != rhs.element_;
+ }
+
+ private:
+ const MappingTable* const table_; // The original table.
+ uint32_t element_; // A value in the range 0 to PcToDexSize.
+ const uint32_t end_; // Equal to table_->PcToDexSize().
+ const uint8_t* encoded_table_ptr_; // Either NULL or points to encoded data after this entry.
+ uint32_t native_pc_offset_; // The current value of native pc offset.
+ uint32_t dex_pc_; // The current value of dex pc.
+ };
+
+ PcToDexIterator PcToDexBegin() const {
+ return PcToDexIterator(this, 0);
+ }
+
+ PcToDexIterator PcToDexEnd() const {
+ uint32_t size = PcToDexSize();
+ return PcToDexIterator(this, size);
+ }
+
+ private:
+ const uint8_t* const encoded_table_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_MAPPING_TABLE_H_
diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h
index 8fde99be3b..d47b3ebccc 100644
--- a/runtime/mirror/abstract_method-inl.h
+++ b/runtime/mirror/abstract_method-inl.h
@@ -144,22 +144,22 @@ inline void AbstractMethod::SetOatCodeOffset(uint32_t code_offset) {
inline uint32_t AbstractMethod::GetOatMappingTableOffset() const {
DCHECK(!Runtime::Current()->IsStarted());
- return reinterpret_cast<uint32_t>(GetMappingTableRaw());
+ return reinterpret_cast<uint32_t>(GetMappingTable());
}
inline void AbstractMethod::SetOatMappingTableOffset(uint32_t mapping_table_offset) {
DCHECK(!Runtime::Current()->IsStarted());
- SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table_offset));
+ SetMappingTable(reinterpret_cast<const uint8_t*>(mapping_table_offset));
}
inline uint32_t AbstractMethod::GetOatVmapTableOffset() const {
DCHECK(!Runtime::Current()->IsStarted());
- return reinterpret_cast<uint32_t>(GetVmapTableRaw());
+ return reinterpret_cast<uint32_t>(GetVmapTable());
}
inline void AbstractMethod::SetOatVmapTableOffset(uint32_t vmap_table_offset) {
DCHECK(!Runtime::Current()->IsStarted());
- SetVmapTable(reinterpret_cast<uint16_t*>(vmap_table_offset));
+ SetVmapTable(reinterpret_cast<uint8_t*>(vmap_table_offset));
}
inline void AbstractMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 93065e7f4e..b3db5c2721 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -24,6 +24,7 @@
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
#include "jni_internal.h"
+#include "mapping_table.h"
#include "object-inl.h"
#include "object_array.h"
#include "object_array-inl.h"
@@ -157,43 +158,27 @@ uintptr_t AbstractMethod::NativePcOffset(const uintptr_t pc) const {
return pc - reinterpret_cast<uintptr_t>(code);
}
-// Find the lowest-address native safepoint pc for a given dex pc
-uintptr_t AbstractMethod::ToFirstNativeSafepointPc(const uint32_t dex_pc) const {
-#if !defined(ART_USE_PORTABLE_COMPILER)
- const uint32_t* mapping_table = GetPcToDexMappingTable();
- if (mapping_table == NULL) {
- DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
- return DexFile::kDexNoIndex; // Special no mapping case
- }
- size_t mapping_table_length = GetPcToDexMappingTableLength();
- for (size_t i = 0; i < mapping_table_length; i += 2) {
- if (mapping_table[i + 1] == dex_pc) {
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
- return mapping_table[i] + reinterpret_cast<uintptr_t>(code);
- }
- }
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(this);
- return 0;
-#else
- // Compiler LLVM doesn't use the machine pc, we just use dex pc instead.
- return static_cast<uint32_t>(dex_pc);
-#endif
-}
-
uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const {
#if !defined(ART_USE_PORTABLE_COMPILER)
- const uint32_t* mapping_table = GetPcToDexMappingTable();
- if (mapping_table == NULL) {
+ MappingTable table(GetMappingTable());
+ if (table.TotalSize() == 0) {
DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
return DexFile::kDexNoIndex; // Special no mapping case
}
- size_t mapping_table_length = GetPcToDexMappingTableLength();
const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(code);
- for (size_t i = 0; i < mapping_table_length; i += 2) {
- if (mapping_table[i] == sought_offset) {
- return mapping_table[i + 1];
+ // Assume the caller wants a pc-to-dex mapping so check here first.
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ // Now check dex-to-pc mappings.
+ typedef MappingTable::DexToPcIterator It2;
+ for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
}
}
LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
@@ -207,21 +192,28 @@ uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const {
}
uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const {
- const uint32_t* mapping_table = GetDexToPcMappingTable();
- if (mapping_table == NULL) {
+ MappingTable table(GetMappingTable());
+ if (table.TotalSize() == 0) {
DCHECK_EQ(dex_pc, 0U);
return 0; // Special no mapping/pc == 0 case
}
- size_t mapping_table_length = GetDexToPcMappingTableLength();
- for (size_t i = 0; i < mapping_table_length; i += 2) {
- uint32_t map_offset = mapping_table[i];
- uint32_t map_dex_offset = mapping_table[i + 1];
- if (map_dex_offset == dex_pc) {
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
- return reinterpret_cast<uintptr_t>(code) + map_offset;
+ return reinterpret_cast<uintptr_t>(code) + cur.NativePcOffset();
}
}
- LOG(FATAL) << "Looking up Dex PC not contained in method, 0x" << std::hex << dex_pc
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
+ return reinterpret_cast<uintptr_t>(code) + cur.NativePcOffset();
+ }
+ }
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
<< " in " << PrettyMethod(this);
return 0;
}
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 2e6e262451..5b8c61cd06 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -246,54 +246,13 @@ class MANAGED AbstractMethod : public Object {
return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, entry_point_from_compiled_code_);
}
- const uint32_t* GetMappingTable() const {
- const uint32_t* map = GetMappingTableRaw();
- if (map == NULL) {
- return map;
- }
- return map + 1;
- }
-
- uint32_t GetPcToDexMappingTableLength() const {
- const uint32_t* map = GetMappingTableRaw();
- if (map == NULL) {
- return 0;
- }
- return map[2];
- }
-
- const uint32_t* GetPcToDexMappingTable() const {
- const uint32_t* map = GetMappingTableRaw();
- if (map == NULL) {
- return map;
- }
- return map + 3;
- }
-
-
- uint32_t GetDexToPcMappingTableLength() const {
- const uint32_t* map = GetMappingTableRaw();
- if (map == NULL) {
- return 0;
- }
- return map[1] - map[2];
- }
-
- const uint32_t* GetDexToPcMappingTable() const {
- const uint32_t* map = GetMappingTableRaw();
- if (map == NULL) {
- return map;
- }
- return map + 3 + map[2];
+ // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
+ const uint8_t* GetMappingTable() const {
+ return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false);
}
-
- const uint32_t* GetMappingTableRaw() const {
- return GetFieldPtr<const uint32_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false);
- }
-
- void SetMappingTable(const uint32_t* mapping_table) {
- SetFieldPtr<const uint32_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_),
+ void SetMappingTable(const uint8_t* mapping_table) {
+ SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_),
mapping_table, false);
}
@@ -301,13 +260,13 @@ class MANAGED AbstractMethod : public Object {
void SetOatMappingTableOffset(uint32_t mapping_table_offset);
- // Callers should wrap the uint16_t* in a VmapTable instance for convenient access.
- const uint16_t* GetVmapTableRaw() const {
- return GetFieldPtr<const uint16_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false);
+ // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
+ const uint8_t* GetVmapTable() const {
+ return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false);
}
- void SetVmapTable(const uint16_t* vmap_table) {
- SetFieldPtr<const uint16_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false);
+ void SetVmapTable(const uint8_t* vmap_table) {
+ SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false);
}
uint32_t GetOatVmapTableOffset() const;
@@ -403,10 +362,6 @@ class MANAGED AbstractMethod : public Object {
// Converts a dex PC to a native PC.
uintptr_t ToNativePc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Converts a dex PC to the first corresponding safepoint PC.
- uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc)
- const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7bffc8cd52..93e98ad894 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -28,6 +28,7 @@
#include "mirror/object-inl.h"
#include "os.h"
#include "utils.h"
+#include "vmap_table.h"
namespace art {
@@ -416,9 +417,10 @@ OatFile::OatMethod::OatMethod(const byte* base,
DCHECK_EQ(0U, static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
__builtin_popcount(fp_spill_mask_)));
} else {
- const uint16_t* vmap_table_ = reinterpret_cast<const uint16_t*>(begin_ + vmap_table_offset_);
- DCHECK_EQ(vmap_table_[0], static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
- __builtin_popcount(fp_spill_mask_)));
+ VmapTable vmap_table(reinterpret_cast<const uint8_t*>(begin_ + vmap_table_offset_));
+
+ DCHECK_EQ(vmap_table.Size(), static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
+ __builtin_popcount(fp_spill_mask_)));
}
} else {
DCHECK_EQ(vmap_table_offset_, 0U);
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index fff6c8a1a6..650301465b 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -97,11 +97,11 @@ class OatFile {
const void* GetCode() const;
uint32_t GetCodeSize() const;
- const uint32_t* GetMappingTable() const {
- return GetOatPointer<const uint32_t*>(mapping_table_offset_);
+ const uint8_t* GetMappingTable() const {
+ return GetOatPointer<const uint8_t*>(mapping_table_offset_);
}
- const uint16_t* GetVmapTable() const {
- return GetOatPointer<const uint16_t*>(vmap_table_offset_);
+ const uint8_t* GetVmapTable() const {
+ return GetOatPointer<const uint8_t*>(vmap_table_offset_);
}
const uint8_t* GetNativeGcMap() const {
return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d49d6bae95..b07a24ef2a 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -24,6 +24,7 @@
#include "object_utils.h"
#include "thread_list.h"
#include "throw_location.h"
+#include "vmap_table.h"
namespace art {
@@ -135,10 +136,10 @@ uint32_t StackVisitor::GetVReg(mirror::AbstractMethod* m, uint16_t vreg, VRegKin
if (cur_quick_frame_ != NULL) {
DCHECK(context_ != NULL); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const VmapTable vmap_table(m->GetVmapTableRaw());
+ const VmapTable vmap_table(m->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, vmap_offset, kind)) {
+ if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? m->GetFpSpillMask()
: m->GetCoreSpillMask();
@@ -160,10 +161,10 @@ void StackVisitor::SetVReg(mirror::AbstractMethod* m, uint16_t vreg, uint32_t ne
if (cur_quick_frame_ != NULL) {
DCHECK(context_ != NULL); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const VmapTable vmap_table(m->GetVmapTableRaw());
+ const VmapTable vmap_table(m->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
- if (vmap_table.IsInContext(vreg, vmap_offset, kind)) {
+ if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? m->GetFpSpillMask() : m->GetCoreSpillMask();
const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kReferenceVReg);
diff --git a/runtime/stack.h b/runtime/stack.h
index de93846112..388e4014c1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -571,78 +571,6 @@ class StackVisitor {
Context* const context_;
};
-class VmapTable {
- public:
- explicit VmapTable(const uint16_t* table) : table_(table) {
- }
-
- uint16_t operator[](size_t i) const {
- return table_[i + 1];
- }
-
- size_t size() const {
- return table_[0];
- }
-
- // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
- // 'kind' is unknown or constant.
- bool IsInContext(size_t vreg, uint32_t& vmap_offset, VRegKind kind) const {
- DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
- kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
- kind == kDoubleHiVReg || kind == kImpreciseConstant);
- vmap_offset = 0xEBAD0FF5;
- // TODO: take advantage of the registers being ordered
- // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
- // are never promoted to floating point registers.
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- bool in_floats = false;
- for (size_t i = 0; i < size(); ++i) {
- // Stop if we find what we are are looking for.
- if ((table_[i + 1] == vreg) && (in_floats == is_float)) {
- vmap_offset = i;
- return true;
- }
- // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
- if (table_[i + 1] == 0xffff) {
- in_floats = true;
- }
- }
- return false;
- }
-
- // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
- // by IsInContext above). If the kind is floating point then the result will be a floating point
- // register number, otherwise it will be an integer register number.
- uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
- // Compute the register we need to load from the context.
- DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
- kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
- kind == kDoubleHiVReg || kind == kImpreciseConstant);
- // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
- // are never promoted to floating point registers.
- bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t matches = 0;
- if (is_float) {
- while (table_[matches] != 0xffff) {
- matches++;
- }
- }
- CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
- uint32_t spill_shifts = 0;
- while (matches != (vmap_offset + 1)) {
- DCHECK_NE(spill_mask, 0u);
- matches += spill_mask & 1; // Add 1 if the low bit is set
- spill_mask >>= 1;
- spill_shifts++;
- }
- spill_shifts--; // wind back one as we want the last match
- return spill_shifts;
- }
-
- private:
- const uint16_t* table_;
-};
-
} // namespace art
#endif // ART_RUNTIME_STACK_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index c79caa21f8..07a003d330 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -66,6 +66,7 @@
#include "utils.h"
#include "verifier/dex_gc_map.h"
#include "verifier/method_verifier.h"
+#include "vmap_table.h"
#include "well_known_classes.h"
namespace art {
@@ -2043,7 +2044,7 @@ class ReferenceMapVisitor : public StackVisitor {
if (num_regs > 0) {
const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
DCHECK(reg_bitmap != NULL);
- const VmapTable vmap_table(m->GetVmapTableRaw());
+ const VmapTable vmap_table(m->GetVmapTable());
uint32_t core_spills = m->GetCoreSpillMask();
uint32_t fp_spills = m->GetFpSpillMask();
size_t frame_size = m->GetFrameSizeInBytes();
@@ -2055,7 +2056,7 @@ class ReferenceMapVisitor : public StackVisitor {
if (TestBitmap(reg, reg_bitmap)) {
uint32_t vmap_offset;
mirror::Object* ref;
- if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) {
+ if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
kReferenceVReg));
ref = reinterpret_cast<mirror::Object*>(val);
diff --git a/runtime/vmap_table.h b/runtime/vmap_table.h
new file mode 100644
index 0000000000..abc50b973c
--- /dev/null
+++ b/runtime/vmap_table.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VMAP_TABLE_H_
+#define ART_RUNTIME_VMAP_TABLE_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+#include "stack.h"
+
+namespace art {
+
+class VmapTable {
+ public:
+ explicit VmapTable(const uint8_t* table) : table_(table) {
+ }
+
+ // Look up nth entry, not called from performance critical code.
+ uint16_t operator[](size_t n) const {
+ const uint8_t* table = table_;
+ size_t size = DecodeUnsignedLeb128(&table);
+ CHECK_LT(n, size);
+ uint16_t entry = DecodeUnsignedLeb128(&table);
+ for (size_t i = 0; i < n; ++i) {
+ entry = DecodeUnsignedLeb128(&table);
+ }
+ return entry;
+ }
+
+ size_t Size() const {
+ const uint8_t* table = table_;
+ return DecodeUnsignedLeb128(&table);
+ }
+
+ // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
+ // 'kind' is unknown or constant.
+ bool IsInContext(size_t vreg, VRegKind kind, uint32_t* vmap_offset) const {
+ DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
+ kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
+ kind == kDoubleHiVReg || kind == kImpreciseConstant);
+ *vmap_offset = 0xEBAD0FF5;
+ // TODO: take advantage of the registers being ordered
+ // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
+ // are never promoted to floating point registers.
+ bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ bool in_floats = false;
+ const uint8_t* table = table_;
+ size_t end = DecodeUnsignedLeb128(&table);
+ for (size_t i = 0; i < end; ++i) {
+ // Stop if we find what we are are looking for.
+ uint16_t entry = DecodeUnsignedLeb128(&table);
+ if ((entry == vreg) && (in_floats == is_float)) {
+ *vmap_offset = i;
+ return true;
+ }
+ // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
+ if (entry == 0xffff) {
+ in_floats = true;
+ }
+ }
+ return false;
+ }
+
+ // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
+ // by IsInContext above). If the kind is floating point then the result will be a floating point
+ // register number, otherwise it will be an integer register number.
+ uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
+ // Compute the register we need to load from the context.
+ DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
+ kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
+ kind == kDoubleHiVReg || kind == kImpreciseConstant);
+ // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
+ // are never promoted to floating point registers.
+ bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+ uint32_t matches = 0;
+ if (UNLIKELY(is_float)) {
+ const uint8_t* table = table_;
+ DecodeUnsignedLeb128(&table); // Skip size.
+ while (DecodeUnsignedLeb128(&table) != 0xffff) {
+ matches++;
+ }
+ matches++;
+ }
+ CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
+ uint32_t spill_shifts = 0;
+ while (matches != (vmap_offset + 1)) {
+ DCHECK_NE(spill_mask, 0u);
+ matches += spill_mask & 1; // Add 1 if the low bit is set
+ spill_mask >>= 1;
+ spill_shifts++;
+ }
+ spill_shifts--; // wind back one as we want the last match
+ return spill_shifts;
+ }
+
+ private:
+ const uint8_t* const table_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_VMAP_TABLE_H_
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 3b5d80d981..84f5f2ea29 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -73,31 +73,31 @@ struct ReferenceMap2Visitor : public StackVisitor {
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
if (m_name.compare("f") == 0) {
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x03U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x03U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8); // v8: this
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x06U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x06U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 1); // v8: this, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x08U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x08U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x0cU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0cU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x0eU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0eU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x10U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x10U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x13U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x13U)));
CHECK(ref_bitmap);
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
@@ -107,45 +107,45 @@ struct ReferenceMap2Visitor : public StackVisitor {
// We eliminate the non-live registers at a return, so only v3 is live:
CHECK_REGS_CONTAIN_REFS(3); // v3: y
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x18U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x18U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1aU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1aU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1dU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1dU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1fU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1fU)));
CHECK(ref_bitmap);
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x21U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x21U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x27U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x27U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x29U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x29U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x2cU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2cU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x2fU)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2fU)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x32U)));
+ ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x32U)));
CHECK(ref_bitmap);
CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
}