summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--build/Android.common_build.mk12
-rw-r--r--cmdline/cmdline_parser_test.cc25
-rw-r--r--cmdline/cmdline_types.h2
-rw-r--r--compiler/Android.mk4
-rw-r--r--compiler/common_compiler_test.cc16
-rw-r--r--compiler/compiled_method.cc75
-rw-r--r--compiler/compiled_method.h24
-rw-r--r--compiler/dex/global_value_numbering_test.cc2
-rw-r--r--compiler/dex/gvn_dead_code_elimination_test.cc2
-rw-r--r--compiler/dex/local_value_numbering.h2
-rw-r--r--compiler/dex/local_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_analysis.cc166
-rw-r--r--compiler/dex/mir_dataflow.cc112
-rw-r--r--compiler/dex/mir_field_info.cc30
-rw-r--r--compiler/dex/mir_field_info.h34
-rw-r--r--compiler/dex/mir_graph.cc16
-rw-r--r--compiler/dex/mir_graph.h4
-rw-r--r--compiler/dex/mir_method_info.cc82
-rw-r--r--compiler/dex/mir_method_info.h32
-rw-r--r--compiler/dex/mir_optimization.cc2
-rw-r--r--compiler/dex/mir_optimization_test.cc4
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc2
-rw-r--r--compiler/dex/quick/gen_common.cc17
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc19
-rw-r--r--compiler/dex/quick/mir_to_lir.cc18
-rw-r--r--compiler/dex/quick/quick_compiler.cc5
-rw-r--r--compiler/dex/verification_results.cc6
-rw-r--r--compiler/dex/verified_method.cc31
-rw-r--r--compiler/dex/verified_method.h16
-rw-r--r--compiler/dex/vreg_analysis.cc5
-rw-r--r--compiler/driver/compiler_driver-inl.h43
-rw-r--r--compiler/driver/compiler_driver.cc123
-rw-r--r--compiler/driver/compiler_driver.h42
-rw-r--r--compiler/jit/jit_compiler.cc244
-rw-r--r--compiler/jit/jit_compiler.h76
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/oat_writer.h4
-rw-r--r--compiler/utils/dex_instruction_utils.h (renamed from runtime/dex_instruction_utils.h)39
-rw-r--r--runtime/Android.mk4
-rw-r--r--runtime/base/logging.h1
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc102
-rw-r--r--runtime/debugger.cc7
-rw-r--r--runtime/dex_file.h7
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/image_space.cc2
-rw-r--r--runtime/instrumentation.cc27
-rw-r--r--runtime/instrumentation.h37
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc25
-rw-r--r--runtime/java_vm_ext.cc3
-rw-r--r--runtime/jit/jit.cc160
-rw-r--r--runtime/jit/jit.h102
-rw-r--r--runtime/jit/jit_code_cache.cc121
-rw-r--r--runtime/jit/jit_code_cache.h118
-rw-r--r--runtime/jit/jit_instrumentation.cc117
-rw-r--r--runtime/jit/jit_instrumentation.h107
-rw-r--r--runtime/mirror/art_field.cc2
-rw-r--r--runtime/mirror/art_method-inl.h5
-rw-r--r--runtime/mirror/art_method.cc17
-rw-r--r--runtime/mirror/art_method.h31
-rw-r--r--runtime/oat_file.cc8
-rw-r--r--runtime/oat_file.h4
-rw-r--r--runtime/object_lock.cc1
-rw-r--r--runtime/parsed_options.cc46
-rw-r--r--runtime/profiler.cc2
-rw-r--r--runtime/quick/inline_method_analyser.cc12
-rw-r--r--runtime/runtime.cc101
-rw-r--r--runtime/runtime.h31
-rw-r--r--runtime/runtime_options.def3
-rw-r--r--runtime/runtime_options.h2
-rw-r--r--runtime/signal_catcher.cc2
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/trace.cc8
-rw-r--r--runtime/trace.h3
-rw-r--r--runtime/transaction.cc2
-rw-r--r--runtime/utils.cc6
-rw-r--r--runtime/verifier/method_verifier.cc47
-rw-r--r--runtime/verifier/method_verifier.h18
-rw-r--r--test/Android.run-test.mk10
-rwxr-xr-xtest/etc/run-test-jar14
-rwxr-xr-xtest/run-all-tests5
-rwxr-xr-xtest/run-test4
83 files changed, 449 insertions, 2221 deletions
diff --git a/.gitignore b/.gitignore
index 3d1658d84a..1cdfed9231 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,3 @@ USE_LLVM_COMPILER
USE_PORTABLE_COMPILER
SMALL_ART
SEA_IR_ART
-JIT_ART
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index f05ca87f24..3000cdf087 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -48,18 +48,6 @@ $(info Disabling ART_BUILD_HOST_DEBUG)
endif
#
-# Used to enable JIT
-#
-ART_JIT := false
-ifneq ($(wildcard art/JIT_ART),)
-$(info Enabling ART_JIT because of existence of art/JIT_ART)
-ART_JIT := true
-endif
-ifeq ($(WITH_ART_JIT), true)
-ART_JIT := true
-endif
-
-#
# Used to enable smart mode
#
ART_SMALL_MODE := false
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9c248d47af..288f7ac013 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -410,26 +410,6 @@ TEST_F(CmdlineParserTest, TestCompilerOption) {
} // TEST_F
/*
-* -Xjit, -Xnojit, -Xjitcodecachesize, Xjitcompilethreshold
-*/
-TEST_F(CmdlineParserTest, TestJitOptions) {
- /*
- * Test successes
- */
- {
- EXPECT_SINGLE_PARSE_VALUE(true, "-Xjit", M::UseJIT);
- EXPECT_SINGLE_PARSE_VALUE(false, "-Xnojit", M::UseJIT);
- }
- {
- EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
- EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * MB), "-Xjitcodecachesize:16M", M::JITCodeCacheCapacity);
- }
- {
- EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold);
- }
-} // TEST_F
-
-/*
* -X-profile-*
*/
TEST_F(CmdlineParserTest, TestProfilerOptions) {
@@ -514,8 +494,9 @@ TEST_F(CmdlineParserTest, TestIgnoredArguments) {
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:abdef",
"-Xdexopt:foobar", "-Xnoquithandler", "-Xjnigreflimit:ixnay", "-Xgenregmap", "-Xnogenregmap",
"-Xverifyopt:never", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:noop",
- "-Xincludeselectedmethod", "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck",
- "-Xjitoffset:none", "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
+ "-Xincludeselectedmethod", "-Xjitthreshold:123", "-Xjitcodecachesize:12345",
+ "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck", "-Xjitoffset:none",
+ "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
"-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=1337"
};
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 04ea368576..180baeced0 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -585,8 +585,6 @@ struct CmdlineType<LogVerbosity> : CmdlineTypeParser<LogVerbosity> {
log_verbosity.heap = true;
} else if (verbose_options[j] == "jdwp") {
log_verbosity.jdwp = true;
- } else if (verbose_options[j] == "jit") {
- log_verbosity.jit = true;
} else if (verbose_options[j] == "jni") {
log_verbosity.jni = true;
} else if (verbose_options[j] == "monitor") {
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 86a27c1b57..beb34dce37 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -79,7 +79,6 @@ LIBART_COMPILER_SRC_FILES := \
driver/compiler_driver.cc \
driver/compiler_options.cc \
driver/dex_compilation_unit.cc \
- jit/jit_compiler.cc \
jni/quick/arm/calling_convention_arm.cc \
jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
@@ -162,7 +161,8 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
driver/compiler_options.h \
image_writer.h \
optimizing/locations.h \
- utils/arm/constants_arm.h
+ utils/arm/constants_arm.h \
+ utils/dex_instruction_utils.h
# $(1): target or host
# $(2): ndebug or debug
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index e8354b2a1b..1cd78f8168 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -52,19 +52,19 @@ void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
const SwapVector<uint8_t>* code = compiled_method->GetQuickCode();
uint32_t code_size = code->size();
CHECK_NE(0u, code_size);
- const SwapVector<uint8_t>* vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table->empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size();
+ const SwapVector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size();
const SwapVector<uint8_t>* mapping_table = compiled_method->GetMappingTable();
bool mapping_table_used = mapping_table != nullptr && !mapping_table->empty();
size_t mapping_table_size = mapping_table_used ? mapping_table->size() : 0U;
uint32_t mapping_table_offset = !mapping_table_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size;
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
bool gc_map_used = gc_map != nullptr && !gc_map->empty();
size_t gc_map_size = gc_map_used ? gc_map->size() : 0U;
uint32_t gc_map_offset = !gc_map_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size + gc_map_size;
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
@@ -72,14 +72,14 @@ void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table->size() + mapping_table_size +
+ size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size +
gc_map_size;
size_t code_offset = compiled_method->AlignCode(size - code_size);
size_t padding = code_offset - (size - code_size);
chunk->reserve(padding + size);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
- chunk->insert(chunk->begin(), vmap_table->begin(), vmap_table->end());
+ chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
if (mapping_table_used) {
chunk->insert(chunk->begin(), mapping_table->begin(), mapping_table->end());
}
@@ -212,7 +212,7 @@ void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) {
CHECK(method != nullptr);
TimingLogger timings("CommonTest::CompileMethod", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_driver_->CompileOne(Thread::Current(), method, &timings);
+ compiler_driver_->CompileOne(method, &timings);
TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
MakeExecutable(method);
}
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 1849e7ef64..22be28c4d9 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -20,29 +20,16 @@
namespace art {
CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code, bool owns_code_array)
+ const ArrayRef<const uint8_t>& quick_code)
: compiler_driver_(compiler_driver), instruction_set_(instruction_set),
- owns_code_array_(owns_code_array), quick_code_(nullptr) {
+ quick_code_(nullptr) {
SetCode(&quick_code);
}
void CompiledCode::SetCode(const ArrayRef<const uint8_t>* quick_code) {
if (quick_code != nullptr) {
CHECK(!quick_code->empty());
- if (owns_code_array_) {
- // If we are supposed to own the code, don't deduplicate it.
- CHECK(quick_code_ == nullptr);
- quick_code_ = new SwapVector<uint8_t>(quick_code->begin(), quick_code->end(),
- compiler_driver_->GetSwapSpaceAllocator());
- } else {
- quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
- }
- }
-}
-
-CompiledCode::~CompiledCode() {
- if (owns_code_array_) {
- delete quick_code_;
+ quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
}
}
@@ -59,11 +46,11 @@ bool CompiledCode::operator==(const CompiledCode& rhs) const {
return (rhs.quick_code_ == nullptr);
}
-size_t CompiledCode::AlignCode(size_t offset) const {
+uint32_t CompiledCode::AlignCode(uint32_t offset) const {
return AlignCode(offset, instruction_set_);
}
-size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) {
+uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
return RoundUp(offset, GetInstructionSetAlignment(instruction_set));
}
@@ -133,39 +120,17 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<LinkerPatch>& patches)
- : CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
- owns_arrays_(!driver->DedupeEnabled()),
- frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
- fp_spill_mask_(fp_spill_mask),
+ : CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
+ core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ src_mapping_table_(src_mapping_table == nullptr ?
+ driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
+ driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()))),
+ mapping_table_(mapping_table.data() == nullptr ?
+ nullptr : driver->DeduplicateMappingTable(mapping_table)),
+ vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
+ gc_map_(native_gc_map.data() == nullptr ? nullptr : driver->DeduplicateGCMap(native_gc_map)),
+ cfi_info_(cfi_info.data() == nullptr ? nullptr : driver->DeduplicateCFIInfo(cfi_info)),
patches_(patches.begin(), patches.end(), driver->GetSwapSpaceAllocator()) {
- if (owns_arrays_) {
- if (src_mapping_table == nullptr) {
- src_mapping_table_ = new SwapSrcMap(driver->GetSwapSpaceAllocator());
- } else {
- src_mapping_table->Arrange();
- src_mapping_table_ = new SwapSrcMap(src_mapping_table->begin(), src_mapping_table->end(),
- driver->GetSwapSpaceAllocator());
- }
- mapping_table_ = mapping_table.empty() ?
- nullptr : new SwapVector<uint8_t>(mapping_table.begin(), mapping_table.end(),
- driver->GetSwapSpaceAllocator());
- vmap_table_ = new SwapVector<uint8_t>(vmap_table.begin(), vmap_table.end(),
- driver->GetSwapSpaceAllocator());
- gc_map_ = native_gc_map.empty() ? nullptr :
- new SwapVector<uint8_t>(native_gc_map.begin(), native_gc_map.end(),
- driver->GetSwapSpaceAllocator());
- cfi_info_ = cfi_info.empty() ? nullptr :
- new SwapVector<uint8_t>(cfi_info.begin(), cfi_info.end(), driver->GetSwapSpaceAllocator());
- } else {
- src_mapping_table_ = src_mapping_table == nullptr ?
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()));
- mapping_table_ = mapping_table.empty() ?
- nullptr : driver->DeduplicateMappingTable(mapping_table);
- vmap_table_ = driver->DeduplicateVMapTable(vmap_table);
- gc_map_ = native_gc_map.empty() ? nullptr : driver->DeduplicateGCMap(native_gc_map);
- cfi_info_ = cfi_info.empty() ? nullptr : driver->DeduplicateCFIInfo(cfi_info);
- }
}
CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
@@ -229,14 +194,4 @@ void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver,
alloc.deallocate(m, 1);
}
-CompiledMethod::~CompiledMethod() {
- if (owns_arrays_) {
- delete src_mapping_table_;
- delete mapping_table_;
- delete vmap_table_;
- delete gc_map_;
- delete cfi_info_;
- }
-}
-
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index d6a07f6226..6013507ac4 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -27,6 +27,10 @@
#include "utils/array_ref.h"
#include "utils/swap_space.h"
+namespace llvm {
+ class Function;
+} // namespace llvm
+
namespace art {
class CompilerDriver;
@@ -35,9 +39,7 @@ class CompiledCode {
public:
// For Quick to supply an code blob
CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code, bool owns_code_array);
-
- virtual ~CompiledCode();
+ const ArrayRef<const uint8_t>& quick_code);
InstructionSet GetInstructionSet() const {
return instruction_set_;
@@ -54,8 +56,8 @@ class CompiledCode {
// To align an offset from a page-aligned value to make it suitable
// for code storage. For example on ARM, to ensure that PC relative
// valu computations work out as expected.
- size_t AlignCode(size_t offset) const;
- static size_t AlignCode(size_t offset, InstructionSet instruction_set);
+ uint32_t AlignCode(uint32_t offset) const;
+ static uint32_t AlignCode(uint32_t offset, InstructionSet instruction_set);
// returns the difference between the code address and a usable PC.
// mainly to cope with kThumb2 where the lower bit must be set.
@@ -76,9 +78,6 @@ class CompiledCode {
const InstructionSet instruction_set_;
- // If we own the code array (means that we free in destructor).
- const bool owns_code_array_;
-
// Used to store the PIC code for Quick.
SwapVector<uint8_t>* quick_code_;
@@ -123,7 +122,6 @@ class SrcMap FINAL : public std::vector<SrcMapElem, Allocator> {
using std::vector<SrcMapElem, Allocator>::size;
explicit SrcMap() {}
- explicit SrcMap(const Allocator& alloc) : std::vector<SrcMapElem, Allocator>(alloc) {}
template <class InputIt>
SrcMap(InputIt first, InputIt last, const Allocator& alloc)
@@ -293,7 +291,7 @@ class CompiledMethod FINAL : public CompiledCode {
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<LinkerPatch>& patches = ArrayRef<LinkerPatch>());
- virtual ~CompiledMethod();
+ ~CompiledMethod() {}
static CompiledMethod* SwapAllocCompiledMethod(
CompilerDriver* driver,
@@ -349,9 +347,9 @@ class CompiledMethod FINAL : public CompiledCode {
return mapping_table_;
}
- const SwapVector<uint8_t>* GetVmapTable() const {
+ const SwapVector<uint8_t>& GetVmapTable() const {
DCHECK(vmap_table_ != nullptr);
- return vmap_table_;
+ return *vmap_table_;
}
SwapVector<uint8_t> const* GetGcMap() const {
@@ -367,8 +365,6 @@ class CompiledMethod FINAL : public CompiledCode {
}
private:
- // Whether or not the arrays are owned by the compiled method or dedupe sets.
- const bool owns_arrays_;
// For quick code, the size of the activation used by the code.
const size_t frame_size_in_bytes_;
// For quick code, a bit mask describing spilled GPR callee-save registers.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b91c3cac8f..54e34eaa81 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -142,7 +142,7 @@ class GlobalValueNumberingTest : public testing::Test {
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index 4d2b8b319f..954e9f1d37 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -143,7 +143,7 @@ class GvnDeadCodeEliminationTest : public testing::Test {
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 379c952fe8..97ea05a914 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,8 @@
#include "base/arena_object.h"
#include "base/logging.h"
-#include "dex_instruction_utils.h"
#include "global_value_numbering.h"
+#include "utils/dex_instruction_utils.h"
namespace art {
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527ad42..d1c3a6b4ba 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -96,7 +96,7 @@ class LocalValueNumberingTest : public testing::Test {
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index a89b2508d4..31dbc60594 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -416,8 +416,8 @@ static const uint16_t kAnalysisAttributes[kMirOpLast] = {
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
kAnInvoke | kAnHeavyWeight,
- // 73 RETURN_VOID_BARRIER
- kAnBranch,
+ // 73 UNUSED_73
+ kAnNone,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
kAnInvoke | kAnHeavyWeight,
@@ -752,88 +752,88 @@ static const uint16_t kAnalysisAttributes[kMirOpLast] = {
// E2 USHR_INT_LIT8 vAA, vBB, #+CC
kAnMath | kAnInt,
- // E3 IGET_QUICK
+ // E3 IGET_VOLATILE
kAnNone,
- // E4 IGET_WIDE_QUICK
+ // E4 IPUT_VOLATILE
kAnNone,
- // E5 IGET_OBJECT_QUICK
+ // E5 SGET_VOLATILE
kAnNone,
- // E6 IPUT_QUICK
+ // E6 SPUT_VOLATILE
kAnNone,
- // E7 IPUT_WIDE_QUICK
+ // E7 IGET_OBJECT_VOLATILE
kAnNone,
- // E8 IPUT_OBJECT_QUICK
+ // E8 IGET_WIDE_VOLATILE
kAnNone,
- // E9 INVOKE_VIRTUAL_QUICK
- kAnInvoke | kAnHeavyWeight,
-
- // EA INVOKE_VIRTUAL_RANGE_QUICK
- kAnInvoke | kAnHeavyWeight,
-
- // EB IPUT_BOOLEAN_QUICK
+ // E9 IPUT_WIDE_VOLATILE
kAnNone,
- // EC IPUT_BYTE_QUICK
+ // EA SGET_WIDE_VOLATILE
kAnNone,
- // ED IPUT_CHAR_QUICK
+ // EB SPUT_WIDE_VOLATILE
kAnNone,
- // EE IPUT_SHORT_QUICK
+ // EC BREAKPOINT
kAnNone,
- // EF IGET_BOOLEAN_QUICK
- kAnNone,
+ // ED THROW_VERIFICATION_ERROR
+ kAnHeavyWeight | kAnBranch,
- // F0 IGET_BYTE_QUICK
+ // EE EXECUTE_INLINE
kAnNone,
- // F1 IGET_CHAR_QUICK
+ // EF EXECUTE_INLINE_RANGE
kAnNone,
- // F2 IGET_SHORT_QUICK
- kAnNone,
+ // F0 INVOKE_OBJECT_INIT_RANGE
+ kAnInvoke | kAnHeavyWeight,
- // F3 UNUSED_F3
- kAnNone,
+ // F1 RETURN_VOID_BARRIER
+ kAnBranch,
- // F4 UNUSED_F4
+ // F2 IGET_QUICK
kAnNone,
- // F5 UNUSED_F5
+ // F3 IGET_WIDE_QUICK
kAnNone,
- // F6 UNUSED_F6
+ // F4 IGET_OBJECT_QUICK
kAnNone,
- // F7 UNUSED_F7
+ // F5 IPUT_QUICK
kAnNone,
- // F8 UNUSED_F8
+ // F6 IPUT_WIDE_QUICK
kAnNone,
- // F9 UNUSED_F9
+ // F7 IPUT_OBJECT_QUICK
kAnNone,
- // FA UNUSED_FA
- kAnNone,
+ // F8 INVOKE_VIRTUAL_QUICK
+ kAnInvoke | kAnHeavyWeight,
- // FB UNUSED_FB
- kAnNone,
+ // F9 INVOKE_VIRTUAL_QUICK_RANGE
+ kAnInvoke | kAnHeavyWeight,
- // FC UNUSED_FC
+ // FA INVOKE_SUPER_QUICK
+ kAnInvoke | kAnHeavyWeight,
+
+ // FB INVOKE_SUPER_QUICK_RANGE
+ kAnInvoke | kAnHeavyWeight,
+
+ // FC IPUT_OBJECT_VOLATILE
kAnNone,
- // FD UNUSED_FD
+ // FD SGET_OBJECT_VOLATILE
kAnNone,
- // FE UNUSED_FE
+ // FE SPUT_OBJECT_VOLATILE
kAnNone,
// FF UNUSED_FF
@@ -1203,13 +1203,12 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
}
void MIRGraph::DoCacheFieldLoweringInfo() {
- static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
// All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
ScopedArenaAllocator allocator(&cu_->arena_stack);
- auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
- DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
- max_refs, kArenaAllocMisc);
+ uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc);
+ DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc);
+
// Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
size_t ifield_pos = 0u;
size_t sfield_pos = max_refs;
@@ -1222,36 +1221,23 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
// Get field index and try to find it among existing indexes. If found, it's usually among
// the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
// is a linear search, it actually performs much better than map based approach.
- const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
- const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
- if (is_iget_or_iput || is_iget_or_iput_quick) {
- uint32_t field_idx;
- DexMemAccessType access_type;
- if (is_iget_or_iput) {
- field_idx = mir->dalvikInsn.vC;
- access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
- } else {
- DCHECK(is_iget_or_iput_quick);
- // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
- // indexes.
- field_idx = mir->offset | kFieldIndexFlagQuickened;
- access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
- }
+ if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) {
+ uint16_t field_idx = mir->dalvikInsn.vC;
size_t i = ifield_pos;
while (i != 0u && field_idxs[i - 1] != field_idx) {
--i;
}
if (i != 0u) {
mir->meta.ifield_lowering_info = i - 1;
- DCHECK_EQ(field_types[i - 1], access_type);
+ DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode));
} else {
mir->meta.ifield_lowering_info = ifield_pos;
field_idxs[ifield_pos] = field_idx;
- field_types[ifield_pos] = access_type;
+ field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
++ifield_pos;
}
} else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
- auto field_idx = mir->dalvikInsn.vB;
+ uint16_t field_idx = mir->dalvikInsn.vB;
size_t i = sfield_pos;
while (i != max_refs && field_idxs[i] != field_idx) {
++i;
@@ -1275,12 +1261,7 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
ifield_lowering_infos_.reserve(ifield_pos);
for (size_t pos = 0u; pos != ifield_pos; ++pos) {
- const uint32_t field_idx = field_idxs[pos];
- const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
- const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
- CHECK_LT(masked_field_idx, 1u << 16);
- ifield_lowering_infos_.push_back(
- MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
+ ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos]));
}
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
ifield_lowering_infos_.data(), ifield_pos);
@@ -1301,19 +1282,18 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
void MIRGraph::DoCacheMethodLoweringInfo() {
static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
- static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
// Embed the map value in the entry to avoid extra padding in 64-bit builds.
struct MapEntry {
// Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
const MethodReference* devirt_target;
- uint32_t target_method_idx;
- uint32_t vtable_idx;
+ uint16_t target_method_idx;
uint16_t invoke_type;
// Map value.
uint32_t lowering_info_index;
};
+ // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
struct MapEntryComparator {
bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
if (lhs.target_method_idx != rhs.target_method_idx) {
@@ -1322,9 +1302,6 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
if (lhs.invoke_type != rhs.invoke_type) {
return lhs.invoke_type < rhs.invoke_type;
}
- if (lhs.vtable_idx != rhs.vtable_idx) {
- return lhs.vtable_idx < rhs.vtable_idx;
- }
if (lhs.devirt_target != rhs.devirt_target) {
if (lhs.devirt_target == nullptr) {
return true;
@@ -1342,7 +1319,7 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
ScopedArenaAllocator allocator(&cu_->arena_stack);
// All INVOKE instructions take 3 code units and there must also be a RETURN.
- const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
+ uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
// Map invoke key (see MapEntry) to lowering info index and vice versa.
// The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
@@ -1353,43 +1330,28 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
// Find INVOKE insns and their devirtualization targets.
- const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (bb->block_type != kDalvikByteCode) {
continue;
}
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
- const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
- if (is_quick_invoke || is_invoke) {
- uint32_t vtable_index = 0;
- uint32_t target_method_idx = 0;
- uint32_t invoke_type_idx = 0; // Default to virtual (in case of quickened).
- DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
- if (is_quick_invoke) {
- // We need to store the vtable index since we can't necessarily recreate it at resolve
- // phase if the dequickening resolved to an interface method.
- vtable_index = mir->dalvikInsn.vB;
- // Fake up the method index by storing the mir offset so that we can read the dequicken
- // info in resolve.
- target_method_idx = mir->offset | kMethodIdxFlagQuickened;
- } else {
- DCHECK(is_invoke);
- // Decode target method index and invoke type.
- invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
- target_method_idx = mir->dalvikInsn.vB;
- }
+ if (IsInstructionInvoke(mir->dalvikInsn.opcode)) {
+ // Decode target method index and invoke type.
+ uint16_t target_method_idx = mir->dalvikInsn.vB;
+ DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
+
// Find devirtualization target.
// TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
// ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
// and increment it as needed instead of making O(log n) lookups.
+ const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
+
// Try to insert a new entry. If the insertion fails, we will have found an old one.
MapEntry entry = {
devirt_target,
target_method_idx,
- vtable_index,
invoke_types[invoke_type_idx],
static_cast<uint32_t>(invoke_map.size())
};
@@ -1400,24 +1362,22 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
}
}
}
+
if (invoke_map.empty()) {
return;
}
+
// Prepare unique method infos, set method info indexes for their MIRs.
+ DCHECK_EQ(method_lowering_infos_.size(), 0u);
const size_t count = invoke_map.size();
method_lowering_infos_.reserve(count);
for (size_t pos = 0u; pos != count; ++pos) {
const MapEntry* entry = sequential_entries[pos];
- const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
- const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
- MirMethodLoweringInfo method_info(masked_method_idx,
- static_cast<InvokeType>(entry->invoke_type), is_quick);
+ MirMethodLoweringInfo method_info(entry->target_method_idx,
+ static_cast<InvokeType>(entry->invoke_type));
if (entry->devirt_target != nullptr) {
method_info.SetDevirtualizationTarget(*entry->devirt_target);
}
- if (is_quick) {
- method_info.SetVTableIndex(entry->vtable_idx);
- }
method_lowering_infos_.push_back(method_info);
}
MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index dfaff6ce3d..f9f7e22b03 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -374,7 +374,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // 73 RETURN_VOID_BARRIER
+ // 73 UNUSED_73
DF_NOP,
// 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
@@ -710,89 +710,89 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
// E2 USHR_INT_LIT8 vAA, vBB, #+CC
DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
- // E3 IGET_QUICK
+ // E3 IGET_VOLATILE
DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // E4 IGET_WIDE_QUICK
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E5 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // E6 IPUT_QUICK
+ // E4 IPUT_VOLATILE
DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // E7 IPUT_WIDE_QUICK
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // E5 SGET_VOLATILE
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
- // E8 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+ // E6 SPUT_VOLATILE
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
- // E9 INVOKE_VIRTUAL_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+ // E7 IGET_OBJECT_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
- // EA INVOKE_VIRTUAL_RANGE_QUICK
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+ // E8 IGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // EB IPUT_BOOLEAN_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // E9 IPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
- // EC IPUT_BYTE_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // EA SGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
- // ED IPUT_CHAR_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // EB SPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
- // EE IPUT_SHORT_QUICK vA, vB, index
- DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // EC BREAKPOINT
+ DF_NOP,
- // EF IGET_BOOLEAN_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // ED THROW_VERIFICATION_ERROR
+ DF_NOP | DF_UMS,
- // F0 IGET_BYTE_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // EE EXECUTE_INLINE
+ DF_FORMAT_35C,
- // F1 IGET_CHAR_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+ // EF EXECUTE_INLINE_RANGE
+ DF_FORMAT_3RC,
- // F2 IGET_SHORT_QUICK vA, vB, index
- DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
- // F3 UNUSED_F3
+ // F0 INVOKE_OBJECT_INIT_RANGE
DF_NOP,
- // F4 UNUSED_F4
+ // F1 RETURN_VOID_BARRIER
DF_NOP,
- // F5 UNUSED_F5
- DF_NOP,
+ // F2 IGET_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // F6 UNUSED_F6
- DF_NOP,
+ // F3 IGET_WIDE_QUICK
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // F7 UNUSED_F7
- DF_NOP,
+ // F4 IGET_OBJECT_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // F8 UNUSED_F8
- DF_NOP,
+ // F5 IPUT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // F9 UNUSED_F9
- DF_NOP,
+ // F6 IPUT_WIDE_QUICK
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // FA UNUSED_FA
- DF_NOP,
+ // F7 IPUT_OBJECT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
- // FB UNUSED_FB
- DF_NOP,
+ // F8 INVOKE_VIRTUAL_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // FC UNUSED_FC
- DF_NOP,
+ // F9 INVOKE_VIRTUAL_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
- // FD UNUSED_FD
- DF_NOP,
+ // FA INVOKE_SUPER_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
- // FE UNUSED_FE
- DF_NOP,
+ // FB INVOKE_SUPER_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FC IPUT_OBJECT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+
+ // FD SGET_OBJECT_VOLATILE
+ DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
+
+ // FE SPUT_OBJECT_VOLATILE
+ DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
// FF UNUSED_FF
DF_NOP,
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index d2079a254d..53afcad871 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -35,9 +35,8 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
DCHECK(field_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType(), it->IsQuickened());
- unresolved.field_offset_ = it->field_offset_;
- unresolved.CheckEquals(*it);
+ MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
}
}
@@ -50,30 +49,13 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
Handle<mirror::Class> referrer_class(hs.NewHandle(
compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
- const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
+
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
- uint32_t field_idx;
- mirror::ArtField* resolved_field;
- if (!it->IsQuickened()) {
- field_idx = it->field_idx_;
- resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit,
- field_idx, false);
- } else {
- const auto mir_offset = it->field_idx_;
- // For quickened instructions, it->field_offset_ actually contains the mir offset.
- // We need to use the de-quickening info to get dex file / field idx
- auto* field_idx_ptr = verified_method->GetDequickenIndex(mir_offset);
- CHECK(field_idx_ptr != nullptr);
- field_idx = field_idx_ptr->index;
- StackHandleScope<1> hs2(soa.Self());
- auto h_dex_cache = hs2.NewHandle(compiler_driver->FindDexCache(field_idx_ptr->dex_file));
- resolved_field = compiler_driver->ResolveFieldWithDexFile(
- soa, h_dex_cache, class_loader, field_idx_ptr->dex_file, field_idx, false);
- // Since we don't have a valid field index we can't go slow path later.
- CHECK(resolved_field != nullptr);
- }
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
if (UNLIKELY(resolved_field == nullptr)) {
continue;
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca5695893e..98b2da8299 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -19,8 +19,8 @@
#include "base/macros.h"
#include "dex_file.h"
-#include "dex_instruction_utils.h"
#include "offsets.h"
+#include "utils/dex_instruction_utils.h"
namespace art {
@@ -39,9 +39,6 @@ class MirFieldInfo {
uint16_t FieldIndex() const {
return field_idx_;
}
- void SetFieldIndex(uint16_t field_idx) {
- field_idx_ = field_idx;
- }
bool IsStatic() const {
return (flags_ & kFlagIsStatic) != 0u;
@@ -54,9 +51,6 @@ class MirFieldInfo {
const DexFile* DeclaringDexFile() const {
return declaring_dex_file_;
}
- void SetDeclaringDexFile(const DexFile* dex_file) {
- declaring_dex_file_ = dex_file;
- }
uint16_t DeclaringClassIndex() const {
return declaring_class_idx_;
@@ -70,35 +64,20 @@ class MirFieldInfo {
return (flags_ & kFlagIsVolatile) != 0u;
}
- // IGET_QUICK, IGET_BYTE_QUICK, ...
- bool IsQuickened() const {
- return (flags_ & kFlagIsQuickened) != 0u;
- }
-
DexMemAccessType MemAccessType() const {
return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask);
}
- void CheckEquals(const MirFieldInfo& other) const {
- CHECK_EQ(field_idx_, other.field_idx_);
- CHECK_EQ(flags_, other.flags_);
- CHECK_EQ(declaring_field_idx_, other.declaring_field_idx_);
- CHECK_EQ(declaring_class_idx_, other.declaring_class_idx_);
- CHECK_EQ(declaring_dex_file_, other.declaring_dex_file_);
- }
-
protected:
enum {
kBitIsStatic = 0,
kBitIsVolatile,
- kBitIsQuickened,
kBitMemAccessTypeBegin,
kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3, // 3 bits for raw type.
kFieldInfoBitEnd = kBitMemAccessTypeEnd
};
static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
- static constexpr uint16_t kFlagIsQuickened = 1u << kBitIsQuickened;
static constexpr uint16_t kMemAccessTypeMask = 7u;
static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask,
"Invalid raw type mask");
@@ -138,10 +117,8 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Construct an unresolved instance field lowering info.
- explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
- : MirFieldInfo(field_idx,
- kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u),
- type), // Without kFlagIsStatic.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
+ : MirFieldInfo(field_idx, kFlagIsVolatile, type), // Without kFlagIsStatic.
field_offset_(0u) {
}
@@ -157,11 +134,6 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
return field_offset_;
}
- void CheckEquals(const MirIFieldLoweringInfo& other) const {
- MirFieldInfo::CheckEquals(other);
- CHECK_EQ(field_offset_.Uint32Value(), other.field_offset_.Uint32Value());
- }
-
private:
enum {
kBitFastGet = kFieldInfoBitEnd,
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index f354a49111..76b5e44df0 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -1673,6 +1673,12 @@ void MIRGraph::GetBlockName(BasicBlock* bb, char* name) {
}
}
+const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
+ // TODO: for inlining support, use current code unit.
+ const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
+ return cu_->dex_file->GetShorty(method_id.proto_idx_);
+}
+
const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) {
const DexFile::MethodId& method_id =
target_method.dex_file->GetMethodId(target_method.dex_method_index);
@@ -1718,7 +1724,8 @@ void MIRGraph::DumpMIRGraph() {
* high-word loc for wide arguments. Also pull up any following
* MOVE_RESULT and incorporate it into the invoke.
*/
-CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) {
+CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
+ bool is_range) {
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
@@ -1737,13 +1744,6 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bo
info->opt_flags = mir->optimization_flags;
info->type = type;
info->is_range = is_range;
- if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) {
- const auto& method_info = GetMethodLoweringInfo(mir);
- info->method_ref = method_info.GetTargetMethod();
- } else {
- info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(),
- mir->dalvikInsn.vB);
- }
info->index = mir->dalvikInsn.vB;
info->offset = mir->offset;
info->mir = mir;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 3dae5b4fa9..e5abd3be51 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -504,7 +504,6 @@ struct CallInfo {
int opt_flags;
InvokeType type;
uint32_t dex_idx;
- MethodReference method_ref;
uint32_t index; // Method idx for invokes, type idx for FilledNewArray.
uintptr_t direct_code;
uintptr_t direct_method;
@@ -688,7 +687,7 @@ class MIRGraph {
void DoCacheMethodLoweringInfo();
- const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
return method_lowering_infos_[mir->meta.method_lowering_info];
}
@@ -1133,6 +1132,7 @@ class MIRGraph {
std::string GetSSAName(int ssa_reg);
std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
void GetBlockName(BasicBlock* bb, char* name);
+ const char* GetShortyFromTargetIdx(int);
const char* GetShortyFromMethodReference(const MethodReference& target_method);
void DumpMIRGraph();
CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 3d3d9790c3..b234950450 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -33,103 +33,51 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
DCHECK(method_infos != nullptr);
DCHECK_NE(count, 0u);
for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
- MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType(), it->IsQuickened());
- unresolved.declaring_dex_file_ = it->declaring_dex_file_;
- unresolved.vtable_idx_ = it->vtable_idx_;
+ MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType());
if (it->target_dex_file_ != nullptr) {
unresolved.target_dex_file_ = it->target_dex_file_;
unresolved.target_method_idx_ = it->target_method_idx_;
}
- if (kIsDebugBuild) {
- unresolved.CheckEquals(*it);
- }
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
}
}
// We're going to resolve methods and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<4> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
Handle<mirror::Class> referrer_class(hs.NewHandle(
compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
- auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
- const DexFile* const dex_file = mUnit->GetDexFile();
- const bool use_jit = Runtime::Current()->UseJit();
- const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
- // For quickened invokes, the dex method idx is actually the mir offset.
- if (it->IsQuickened()) {
- const auto* dequicken_ref = verified_method->GetDequickenIndex(it->method_idx_);
- CHECK(dequicken_ref != nullptr);
- it->target_dex_file_ = dequicken_ref->dex_file;
- it->target_method_idx_ = dequicken_ref->index;
- }
// Remember devirtualized invoke target and set the called method to the default.
MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
+ it->target_dex_file_ = mUnit->GetDexFile();
+ it->target_method_idx_ = it->MethodIndex();
+
InvokeType invoke_type = it->GetInvokeType();
- mirror::ArtMethod* resolved_method = nullptr;
- if (!it->IsQuickened()) {
- it->target_dex_file_ = dex_file;
- it->target_method_idx_ = it->MethodIndex();
- current_dex_cache.Assign(dex_cache.Get());
- resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit,
- it->MethodIndex(), invoke_type);
- } else {
- // The method index is actually the dex PC in this case.
- // Calculate the proper dex file and target method idx.
- CHECK(use_jit);
- CHECK_EQ(invoke_type, kVirtual);
- // Don't devirt if we are in a different dex file since we can't have direct invokes in
- // another dex file unless we always put a direct / patch pointer.
- devirt_target = nullptr;
- current_dex_cache.Assign(
- Runtime::Current()->GetClassLinker()->FindDexCache(*it->target_dex_file_));
- CHECK(current_dex_cache.Get() != nullptr);
- DexCompilationUnit cu(
- mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
- *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */,
- it->target_method_idx_, 0u /* access_flags not used */,
- nullptr /* verified_method not used */);
- resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
- it->target_method_idx_, invoke_type, false);
- if (resolved_method != nullptr) {
- // Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be
- // resolved to an interface method. If this is the case then change the invoke type to
- // interface with the assumption that sharp_type will be kVirtual.
- if (resolved_method->GetInvokeType() == kInterface) {
- it->flags_ = (it->flags_ & ~(kInvokeTypeMask << kBitInvokeTypeBegin)) |
- (static_cast<uint16_t>(kInterface) << kBitInvokeTypeBegin);
- }
- }
- }
+ mirror::ArtMethod* resolved_method =
+ compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(),
+ invoke_type);
if (UNLIKELY(resolved_method == nullptr)) {
continue;
}
compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
&it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
- if (!it->IsQuickened()) {
- // For quickened invoke virtuals we may have desharpened to an interface method which
- // wont give us the right method index, in this case blindly dispatch or else we can't
- // compile the method. Converting the invoke to interface dispatch doesn't work since we
- // have no way to get the dex method index for quickened invoke virtuals in the interface
- // trampolines.
- it->vtable_idx_ =
- compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
- }
+ it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
- MethodReference target_method(it->target_dex_file_, it->target_method_idx_);
+ MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
- &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
- const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass();
- const bool is_class_initialized =
+ soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
+ &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
+ bool is_referrers_class = (referrer_class.Get() == resolved_method->GetDeclaringClass());
+ bool is_class_initialized =
compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
~(kFlagFastPath | kFlagClassIsInitialized | (kInvokeTypeMask << kBitSharpTypeBegin));
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index e131c96a81..08fb103cf6 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -46,9 +46,6 @@ class MirMethodInfo {
const DexFile* DeclaringDexFile() const {
return declaring_dex_file_;
}
- void SetDeclaringDexFile(const DexFile* dex_file) {
- declaring_dex_file_ = dex_file;
- }
uint16_t DeclaringClassIndex() const {
return declaring_class_idx_;
@@ -101,12 +98,11 @@ class MirMethodLoweringInfo : public MirMethodInfo {
MirMethodLoweringInfo* method_infos, size_t count)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
+ MirMethodLoweringInfo(uint16_t method_idx, InvokeType type)
: MirMethodInfo(method_idx,
((type == kStatic) ? kFlagIsStatic : 0u) |
(static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
- (static_cast<uint16_t>(type) << kBitSharpTypeBegin) |
- (is_quickened ? kFlagQuickened : 0u)),
+ (static_cast<uint16_t>(type) << kBitSharpTypeBegin)),
direct_code_(0u),
direct_method_(0u),
target_dex_file_(nullptr),
@@ -135,11 +131,6 @@ class MirMethodLoweringInfo : public MirMethodInfo {
return (flags_ & kFlagClassIsInitialized) != 0u;
}
- // Returns true iff the method invoke is INVOKE_VIRTUAL_QUICK or INVOKE_VIRTUAL_RANGE_QUICK.
- bool IsQuickened() const {
- return (flags_ & kFlagQuickened) != 0u;
- }
-
InvokeType GetInvokeType() const {
return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
}
@@ -155,9 +146,6 @@ class MirMethodLoweringInfo : public MirMethodInfo {
uint16_t VTableIndex() const {
return vtable_idx_;
}
- void SetVTableIndex(uint16_t index) {
- vtable_idx_ = index;
- }
uintptr_t DirectCode() const {
return direct_code_;
@@ -171,20 +159,6 @@ class MirMethodLoweringInfo : public MirMethodInfo {
return stats_flags_;
}
- void CheckEquals(const MirMethodLoweringInfo& info) const {
- CHECK_EQ(method_idx_, info.method_idx_);
- CHECK_EQ(flags_, info.flags_);
- CHECK_EQ(declaring_method_idx_, info.declaring_method_idx_);
- CHECK_EQ(declaring_class_idx_, info.declaring_class_idx_);
- CHECK_EQ(declaring_dex_file_, info.declaring_dex_file_);
- CHECK_EQ(direct_code_, info.direct_code_);
- CHECK_EQ(direct_method_, info.direct_method_);
- CHECK_EQ(target_dex_file_, info.target_dex_file_);
- CHECK_EQ(target_method_idx_, info.target_method_idx_);
- CHECK_EQ(vtable_idx_, info.vtable_idx_);
- CHECK_EQ(stats_flags_, info.stats_flags_);
- }
-
private:
enum {
kBitFastPath = kMethodInfoBitEnd,
@@ -194,14 +168,12 @@ class MirMethodLoweringInfo : public MirMethodInfo {
kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type.
kBitIsReferrersClass = kBitSharpTypeEnd,
kBitClassIsInitialized,
- kBitQuickened,
kMethodLoweringInfoBitEnd
};
static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
- static constexpr uint16_t kFlagQuickened = 1u << kBitQuickened;
static constexpr uint16_t kInvokeTypeMask = 7u;
static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
"assert invoke type bits failed");
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 93749e4424..fd67d4ebec 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1437,7 +1437,7 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke,
nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
0u /* access_flags not used */, nullptr /* verified_method not used */);
DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
- MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
+ MirIFieldLoweringInfo inlined_field_info(field_idx, type);
MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
DCHECK(inlined_field_info.IsResolved());
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 9ce5ebbc1b..be05b80d83 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -254,7 +254,7 @@ class MirOptimizationTest : public testing::Test {
cu_.mir_graph->method_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const MethodDef* def = &defs[i];
- MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type, false);
+ MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type);
if (def->declaring_dex_file != 0u) {
method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
method_info.declaring_class_idx_ = def->declaring_class_idx;
@@ -407,7 +407,7 @@ class NullCheckEliminationTest : public MirOptimizationTest {
cu_.mir_graph->ifield_lowering_infos_.reserve(count);
for (size_t i = 0u; i != count; ++i) {
const IFieldDef* def = &defs[i];
- MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
+ MirIFieldLoweringInfo field_info(def->field_idx, def->type);
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_class_idx_ = def->declaring_class_idx;
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index f636e3b880..7245853125 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -427,7 +427,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
InlineMethod intrinsic;
{
ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = inline_methods_.find(info->method_ref.dex_method_index);
+ auto it = inline_methods_.find(info->index);
if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
return false;
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 6f68d1ac2b..3c9b7a3ed3 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -865,12 +865,7 @@ void Mir2Lir::HandleSlowPaths() {
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
RegLocation rl_dest, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- if (kIsDebugBuild) {
- auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
- IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
- IGetMemAccessType(mir->dalvikInsn.opcode);
- DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
- }
+ DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
@@ -895,9 +890,6 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type
StoreValue(rl_dest, rl_result);
}
} else {
- if (field_info.DeclaringDexFile() != nullptr) {
- DCHECK_EQ(field_info.DeclaringDexFile(), cu_->dex_file);
- }
DCHECK(SizeMatchesTypeForEntrypoint(size, type));
QuickEntrypointEnum target;
switch (type) {
@@ -947,12 +939,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type
void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- if (kIsDebugBuild) {
- auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
- IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
- IPutMemAccessType(mir->dalvikInsn.opcode);
- DCHECK_EQ(mem_access_type, field_info.MemAccessType());
- }
+ DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 040b07cedd..8e3df7c7a2 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -863,12 +863,11 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturn(
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
+ res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
}
return res;
}
@@ -877,12 +876,11 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturnWide(ShortyToRegClass(
- mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
+ res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
}
return res;
}
@@ -1420,8 +1418,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
void Mir2Lir::GenInvoke(CallInfo* info) {
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- const DexFile* dex_file = info->method_ref.dex_file;
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file)
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
->GenIntrinsic(this, info)) {
return;
}
@@ -1431,7 +1428,7 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
int call_state = 0;
LIR* null_ck;
- LIR** p_null_ck = nullptr;
+ LIR** p_null_ck = NULL;
NextCallInsn next_call_insn;
FlushAllRegs(); /* Everything to home location */
// Explicit register usage
@@ -1443,7 +1440,6 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
bool skip_this;
-
if (info->type == kInterface) {
next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
skip_this = fast_path;
@@ -1473,8 +1469,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
// Finish up any of the call sequence not interleaved in arg loading
while (call_state >= 0) {
call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
- method_info.DirectCode(), method_info.DirectMethod(),
- original_type);
+ method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
MarkSafepointPC(call_insn);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 966a92d290..34e5e25efe 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -540,7 +540,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenMoveException(rl_dest);
break;
- case Instruction::RETURN_VOID_BARRIER:
case Instruction::RETURN_VOID:
if (((cu_->access_flags & kAccConstructor) != 0) &&
cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
@@ -791,12 +790,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
break;
- case Instruction::IGET_OBJECT_QUICK:
case Instruction::IGET_OBJECT:
GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
break;
- case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_WIDE:
// kPrimLong and kPrimDouble share the same entrypoints.
if (rl_dest.fp) {
@@ -806,7 +803,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
}
break;
- case Instruction::IGET_QUICK:
case Instruction::IGET:
if (rl_dest.fp) {
GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
@@ -815,54 +811,43 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
}
break;
- case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_CHAR:
GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
break;
- case Instruction::IGET_SHORT_QUICK:
case Instruction::IGET_SHORT:
GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
break;
- case Instruction::IGET_BOOLEAN_QUICK:
case Instruction::IGET_BOOLEAN:
GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
break;
- case Instruction::IGET_BYTE_QUICK:
case Instruction::IGET_BYTE:
GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
break;
- case Instruction::IPUT_WIDE_QUICK:
case Instruction::IPUT_WIDE:
GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
break;
- case Instruction::IPUT_OBJECT_QUICK:
case Instruction::IPUT_OBJECT:
GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
break;
- case Instruction::IPUT_QUICK:
case Instruction::IPUT:
GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
break;
- case Instruction::IPUT_BYTE_QUICK:
- case Instruction::IPUT_BOOLEAN_QUICK:
case Instruction::IPUT_BYTE:
case Instruction::IPUT_BOOLEAN:
GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
break;
- case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_CHAR:
GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
break;
- case Instruction::IPUT_SHORT_QUICK:
case Instruction::IPUT_SHORT:
GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
break;
@@ -936,12 +921,9 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
break;
- case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
break;
-
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
break;
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fcf4716c1a..19c2a5a3a3 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -542,11 +542,6 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
// Disable optimizations according to instruction set.
cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
- if (Runtime::Current()->UseJit()) {
- // Disable these optimizations for JIT until quickened byte codes are done being implemented.
- // TODO: Find a cleaner way to do this.
- cu.disable_opt |= 1u << kLocalValueNumbering;
- }
}
void QuickCompiler::Init() {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 51a3d84382..4ff173d83a 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -66,10 +66,8 @@ bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: "
<< PrettyMethod(ref.dex_method_index, *ref.dex_file);
- if (!Runtime::Current()->UseJit()) {
- DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
- DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
- }
+ DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
+ DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size());
delete it->second;
verified_methods_.erase(it);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 42d66be60d..21e965d932 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -24,7 +24,6 @@
#include "base/stl_util.h"
#include "dex_file.h"
#include "dex_instruction-inl.h"
-#include "dex_instruction_utils.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -53,11 +52,6 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve
if (method_verifier->HasVirtualOrInterfaceInvokes()) {
verified_method->GenerateDevirtMap(method_verifier);
}
-
- // Only need dequicken info for JIT so far.
- if (Runtime::Current()->UseJit()) {
- verified_method->GenerateDequickenMap(method_verifier);
- }
}
if (method_verifier->HasCheckCasts()) {
@@ -71,12 +65,6 @@ const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const {
return (it != devirt_map_.end()) ? &it->second : nullptr;
}
-const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
- DCHECK(Runtime::Current()->UseJit());
- auto it = dequicken_map_.find(dex_pc);
- return (it != dequicken_map_.end()) ? &it->second : nullptr;
-}
-
bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
}
@@ -194,7 +182,7 @@ void VerifiedMethod::ComputeGcMapSizes(verifier::MethodVerifier* method_verifier
*log2_max_gc_pc = i;
}
-void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
+void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) {
if (method_verifier->HasFailures()) {
return;
}
@@ -208,24 +196,13 @@ void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verif
if (is_virtual_quick || is_range_quick) {
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- mirror::ArtMethod* method =
- method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick);
+ mirror::ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line,
+ is_range_quick);
CHECK(method != nullptr);
// The verifier must know what the type of the object was or else we would have gotten a
// failure. Put the dex method index in the dequicken map since we need this to get number of
// arguments in the compiler.
- dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(),
- method->GetDexMethodIndex()));
- } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
- uint32_t dex_pc = inst->GetDexPc(insns);
- verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
- CHECK(field != nullptr);
- // The verifier must know what the type of the field was or else we would have gotten a
- // failure. Put the dex field index in the dequicken map since we need this for lowering
- // in the compiler.
- // TODO: Putting a field index in a method reference is gross.
- dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
+ dequicken_map_.Put(dex_pc, method->ToMethodReference());
}
}
}
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 748bdcb71c..fe9dfd1cb0 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -20,7 +20,6 @@
#include <vector>
#include "base/mutex.h"
-#include "dex_file.h"
#include "method_reference.h"
#include "safe_map.h"
@@ -40,9 +39,6 @@ class VerifiedMethod {
// Devirtualization map type maps dex offset to concrete method reference.
typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap;
- // Devirtualization map type maps dex offset to field / method idx.
- typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
-
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
~VerifiedMethod() = default;
@@ -62,10 +58,6 @@ class VerifiedMethod {
// Returns the devirtualization target method, or nullptr if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
- // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
- // no entry for that dex pc.
- const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const;
-
// Returns true if the cast can statically be verified to be redundant
// by using the check-cast elision peephole optimization in the verifier.
bool IsSafeCast(uint32_t pc) const;
@@ -94,7 +86,7 @@ class VerifiedMethod {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_.
- void GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
+ void GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
@@ -103,9 +95,9 @@ class VerifiedMethod {
std::vector<uint8_t> dex_gc_map_;
DevirtualizationMap devirt_map_;
- // Dequicken map is required for compiling quickened byte codes. The quicken maps from
- // dex PC to dex method index or dex field index based on the instruction.
- DequickenMap dequicken_map_;
+ // Dequicken map is required for having the compiler compiled quickened invokes. The quicken map
+ // enables us to get the dex method index so that we can get the required argument count.
+ DevirtualizationMap dequicken_map_;
SafeCastSet safe_cast_set_;
};
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38f5a..b620969ae2 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -19,7 +19,6 @@
#include "compiler_ir.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex_flags.h"
-#include "driver/dex_compilation_unit.h"
namespace art {
@@ -260,8 +259,8 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
if ((flags & Instruction::kInvoke) &&
(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
DCHECK_EQ(next, 0);
- const auto& lowering_info = GetMethodLoweringInfo(mir);
- const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
+ int target_idx = mir->dalvikInsn.vB;
+ const char* shorty = GetShortyFromTargetIdx(target_idx);
// Handle result type if floating point
if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
MIR* move_result_mir = FindMoveResult(bb, mir);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 4a35e9fbe7..9948c82663 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -56,13 +56,14 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
return referrer_class;
}
-inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
+inline mirror::ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
+ Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK_EQ(dex_cache->GetDexFile(), dex_file);
- mirror::ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField(
- *dex_file, field_idx, dex_cache, class_loader, is_static);
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
+ *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
// Clean up any exception left by type resolution.
@@ -77,19 +78,6 @@ inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
return resolved_field;
}
-inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
- return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
-}
-
-inline mirror::ArtField* CompilerDriver::ResolveField(
- const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- uint32_t field_idx, bool is_static) {
- DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
- is_static);
-}
-
inline void CompilerDriver::GetResolvedFieldDexFileLocation(
mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
@@ -184,7 +172,7 @@ inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referr
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
+ uint32_t method_idx, InvokeType invoke_type) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
@@ -196,8 +184,7 @@ inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
soa.Self()->ClearException();
return nullptr;
}
- if (check_incompatible_class_change &&
- UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
+ if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
// Silently return nullptr on incompatible class change.
return nullptr;
}
@@ -240,14 +227,14 @@ inline int CompilerDriver::IsFastInvoke(
target_method->dex_method_index))) {
return 0;
}
+
// Sharpen a virtual call into a direct call when the target is known not to have been
// overridden (ie is final).
- const bool same_dex_file = target_method->dex_file == mUnit->GetDexFile();
- bool can_sharpen_virtual_based_on_type = same_dex_file &&
+ bool can_sharpen_virtual_based_on_type =
(*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- bool can_sharpen_super_based_on_type = same_dex_file && (*invoke_type == kSuper) &&
+ bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
(methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
@@ -256,10 +243,10 @@ inline int CompilerDriver::IsFastInvoke(
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
- CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
- DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index),
- resolved_method) << PrettyMethod(resolved_method);
+ CHECK(target_method->dex_file == mUnit->GetDexFile());
+ DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
+ resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
kDirect, // Sharp type
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 15b3d08a37..b8a893649b 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -360,7 +360,6 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
classes_to_compile_(compiled_classes),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
- dedupe_enabled_(true),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
dump_cfg_file_name_(dump_cfg_file_name),
@@ -381,7 +380,12 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
compiler_->Init();
- CHECK_EQ(image_, image_classes_.get() != nullptr);
+ CHECK(!Runtime::Current()->IsStarted());
+ if (image_) {
+ CHECK(image_classes_.get() != nullptr);
+ } else {
+ CHECK(image_classes_.get() == nullptr);
+ }
// Read the profile file if one is provided.
if (!profile_file.empty()) {
@@ -395,32 +399,26 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateCode(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
return dedupe_code_.Add(Thread::Current(), code);
}
SwapSrcMap* CompilerDriver::DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map) {
- DCHECK(dedupe_enabled_);
return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
return dedupe_mapping_table_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateVMapTable(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
return dedupe_vmap_table_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateGCMap(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
return dedupe_gc_map_.Add(Thread::Current(), code);
}
SwapVector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info) {
- DCHECK(dedupe_enabled_);
return dedupe_cfi_info_.Add(Thread::Current(), cfi_info);
}
@@ -493,12 +491,8 @@ void CompilerDriver::CompileAll(jobject class_loader,
static DexToDexCompilationLevel GetDexToDexCompilationlevel(
Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* const runtime = Runtime::Current();
- if (runtime->UseJit()) {
- return kDontDexToDexCompile;
- }
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ClassLinker* class_linker = runtime->GetClassLinker();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
if (klass == nullptr) {
CHECK(self->IsExceptionPending());
@@ -524,8 +518,9 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
}
}
-void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) {
+void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
+ Thread* self = Thread::Current();
jobject jclass_loader;
const DexFile* dex_file;
uint16_t class_def_idx;
@@ -534,8 +529,9 @@ void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingL
InvokeType invoke_type = method->GetInvokeType();
{
ScopedObjectAccessUnchecked soa(self);
- ScopedLocalRef<jobject> local_class_loader(
- soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+ ScopedLocalRef<jobject>
+ local_class_loader(soa.Env(),
+ soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
// Find the dex_file
dex_file = method->GetDexFile();
@@ -553,7 +549,7 @@ void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingL
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
- ScopedObjectAccess soa(self);
+ ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -561,33 +557,12 @@ void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingL
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
class_def);
}
- CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
- jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
+ CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
+ *dex_file, dex_to_dex_compilation_level, true);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
- self->TransitionFromSuspendedToRunnable();
-}
-CompiledMethod* CompilerDriver::CompileMethod(Thread* self, mirror::ArtMethod* method) {
- const uint32_t method_idx = method->GetDexMethodIndex();
- const uint32_t access_flags = method->GetAccessFlags();
- const InvokeType invoke_type = method->GetInvokeType();
- StackHandleScope<1> hs(self);
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- method->GetDeclaringClass()->GetClassLoader()));
- jobject jclass_loader = class_loader.ToJObject();
- const DexFile* dex_file = method->GetDexFile();
- const uint16_t class_def_idx = method->GetClassDefIndex();
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- DexToDexCompilationLevel dex_to_dex_compilation_level =
- GetDexToDexCompilationlevel(self, class_loader, *dex_file, class_def);
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
- self->TransitionFromRunnableToSuspended(kNative);
- CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
- jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
- auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx));
self->TransitionFromSuspendedToRunnable();
- return compiled_method;
}
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
@@ -1060,8 +1035,7 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
ScopedObjectAccess soa(Thread::Current());
- Runtime* runtime = Runtime::Current();
- mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
return false;
@@ -1071,8 +1045,7 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
return false;
}
*out_is_finalizable = resolved_class->IsFinalizable();
- gc::Heap* heap = runtime->GetHeap();
- const bool compiling_boot = heap->IsCompilingBoot();
+ const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
const bool support_boot_image_fixup = GetSupportBootImageFixup();
if (compiling_boot) {
// boot -> boot class pointers.
@@ -1088,15 +1061,10 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
} else {
return false;
}
- } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
- *is_type_initialized = resolved_class->IsInitialized();
- // If the class may move around, then don't embed it as a direct pointer.
- *use_direct_type_ptr = true;
- *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
- return true;
} else {
// True if the class is in the image at app compiling time.
- const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
+ const bool class_in_image =
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
if (class_in_image && support_boot_image_fixup) {
// boot -> app class pointers.
*is_type_initialized = resolved_class->IsInitialized();
@@ -1289,10 +1257,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
// invoked, so this can be passed to the out-of-line runtime support code.
*direct_code = 0;
*direct_method = 0;
- Runtime* const runtime = Runtime::Current();
- gc::Heap* const heap = runtime->GetHeap();
bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default
- const bool compiling_boot = heap->IsCompilingBoot();
+ const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
// TODO This is somewhat hacky. We should refactor all of this invoke codepath.
const bool force_relocations = (compiling_boot ||
GetCompilerOptions().GetIncludePatchInformation());
@@ -1301,15 +1267,14 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
}
// TODO: support patching on all architectures.
use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
- mirror::Class* declaring_class = method->GetDeclaringClass();
- bool method_code_in_boot = declaring_class->GetClassLoader() == nullptr;
+ bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
if (!use_dex_cache) {
if (!method_code_in_boot) {
use_dex_cache = true;
} else {
bool has_clinit_trampoline =
- method->IsStatic() && !declaring_class->IsInitialized();
- if (has_clinit_trampoline && declaring_class != referrer_class) {
+ method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
+ if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
// Ensure we run the clinit trampoline unless we are invoking a static method in the same
// class.
use_dex_cache = true;
@@ -1337,9 +1302,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
// The method is defined not within this dex file. We need a dex cache slot within the current
// dex file or direct pointers.
bool must_use_direct_pointers = false;
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJit() && dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr)) {
+ if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
if (no_guarantee_of_dex_cache_entry) {
@@ -1352,7 +1315,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
} else {
if (force_relocations && !use_dex_cache) {
target_method->dex_method_index = method->GetDexMethodIndex();
- target_method->dex_file = dex_cache->GetDexFile();
+ target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
}
must_use_direct_pointers = true;
}
@@ -1367,7 +1330,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*type = sharp_type;
}
} else {
- bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace();
+ bool method_in_image =
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
if (method_in_image || compiling_boot) {
// We know we must be able to get to the method in the image, so use that pointer.
CHECK(!method->IsAbstract());
@@ -2036,11 +2000,10 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
- Thread* self = Thread::Current();
{
// Use a scoped object access to perform to the quick SkipClass check.
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- ScopedObjectAccess soa(self);
+ ScopedObjectAccess soa(Thread::Current());
StackHandleScope<3> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2067,7 +2030,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
// Can we run DEX-to-DEX compiler on this class ?
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
- ScopedObjectAccess soa(self);
+ ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2098,7 +2061,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
continue;
}
previous_direct_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
compilation_enabled);
@@ -2115,7 +2078,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
continue;
}
previous_virtual_method_idx = method_idx;
- driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+ driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
compilation_enabled);
@@ -2148,10 +2111,10 @@ static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
}
}
-void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx,
- jobject class_loader, const DexFile& dex_file,
+void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+ InvokeType invoke_type, uint16_t class_def_idx,
+ uint32_t method_idx, jobject class_loader,
+ const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
@@ -2199,6 +2162,7 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
}
}
+ Thread* self = Thread::Current();
if (compiled_method != nullptr) {
// Count non-relative linker patches.
size_t non_relative_linker_patch_count = 0u;
@@ -2230,21 +2194,6 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
}
}
-void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
- CompiledMethod* compiled_method = nullptr;
- {
- MutexLock mu(Thread::Current(), compiled_methods_lock_);
- auto it = compiled_methods_.find(method_ref);
- if (it != compiled_methods_.end()) {
- compiled_method = it->second;
- compiled_methods_.erase(it);
- }
- }
- if (compiled_method != nullptr) {
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, compiled_method);
- }
-}
-
CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
MutexLock mu(Thread::Current(), compiled_classes_lock_);
ClassTable::const_iterator it = compiled_classes_.find(ref);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 24b6f177da..b7562442d7 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -45,10 +45,6 @@
namespace art {
-namespace mirror {
-class DexCache;
-} // namespace mirror
-
namespace verifier {
class MethodVerifier;
} // namespace verifier
@@ -111,11 +107,8 @@ class CompilerDriver {
TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- CompiledMethod* CompileMethod(Thread* self, mirror::ArtMethod*)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
-
// Compile a single Method.
- void CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings)
+ void CompileOne(mirror::ArtMethod* method, TimingLogger* timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const {
@@ -179,9 +172,6 @@ class CompilerDriver {
size_t GetNonRelativeLinkerPatchCount() const
LOCKS_EXCLUDED(compiled_methods_lock_);
- // Remove and delete a compiled method.
- void RemoveCompiledMethod(const MethodReference& method_ref);
-
void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
uint16_t class_def_index);
bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index);
@@ -236,13 +226,6 @@ class CompilerDriver {
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a field with a given dex file.
- mirror::ArtField* ResolveFieldWithDexFile(
- const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
- uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
@@ -252,10 +235,6 @@ class CompilerDriver {
bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
MemberOffset GetFieldOffset(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find a dex cache for a dex file.
- inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
@@ -282,7 +261,7 @@ class CompilerDriver {
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
+ uint32_t method_idx, InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
@@ -316,13 +295,6 @@ class CompilerDriver {
void ProcessedStaticField(bool resolved, bool local);
void ProcessedInvoke(InvokeType invoke_type, int flags);
- void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- const ScopedObjectAccess& soa, bool is_static,
- mirror::ArtField** resolved_field,
- mirror::Class** referrer_class,
- mirror::DexCache** dex_cache)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, bool* is_volatile)
@@ -408,13 +380,6 @@ class CompilerDriver {
return timings_logger_;
}
- void SetDedupeEnabled(bool dedupe_enabled) {
- dedupe_enabled_ = dedupe_enabled;
- }
- bool DedupeEnabled() const {
- return dedupe_enabled_;
- }
-
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
@@ -519,7 +484,7 @@ class CompilerDriver {
const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
+ void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level,
@@ -580,7 +545,6 @@ class CompilerDriver {
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
- bool dedupe_enabled_;
bool dump_stats_;
const bool dump_passes_;
const std::string& dump_cfg_file_name_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
deleted file mode 100644
index b1d972e44e..0000000000
--- a/compiler/jit/jit_compiler.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jit_compiler.h"
-
-#include "arch/instruction_set.h"
-#include "arch/instruction_set_features.h"
-#include "compiler_callbacks.h"
-#include "dex/pass_manager.h"
-#include "dex/quick_compiler_callbacks.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
-#include "mirror/art_method-inl.h"
-#include "oat_file-inl.h"
-#include "object_lock.h"
-#include "thread_list.h"
-#include "verifier/method_verifier-inl.h"
-
-namespace art {
-namespace jit {
-
-JitCompiler* JitCompiler::Create() {
- return new JitCompiler();
-}
-
-extern "C" void* jit_load(CompilerCallbacks** callbacks) {
- VLOG(jit) << "loading jit compiler";
- auto* const jit_compiler = JitCompiler::Create();
- CHECK(jit_compiler != nullptr);
- *callbacks = jit_compiler->GetCompilerCallbacks();
- VLOG(jit) << "Done loading jit compiler";
- return jit_compiler;
-}
-
-extern "C" void jit_unload(void* handle) {
- DCHECK(handle != nullptr);
- delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(void* handle, mirror::ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method);
-}
-
-JitCompiler::JitCompiler() : total_time_(0) {
- auto* pass_manager_options = new PassManagerOptions;
- pass_manager_options->SetDisablePassList("GVN,DCE");
- compiler_options_.reset(new CompilerOptions(
- CompilerOptions::kDefaultCompilerFilter,
- CompilerOptions::kDefaultHugeMethodThreshold,
- CompilerOptions::kDefaultLargeMethodThreshold,
- CompilerOptions::kDefaultSmallMethodThreshold,
- CompilerOptions::kDefaultTinyMethodThreshold,
- CompilerOptions::kDefaultNumDexMethodsThreshold,
- false,
- false,
- CompilerOptions::kDefaultTopKProfileThreshold,
- false,
- false,
- false,
- false,
- true, // pic
- nullptr,
- pass_manager_options,
- nullptr));
- const InstructionSet instruction_set = kRuntimeISA;
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
- cumulative_logger_.reset(new CumulativeLogger("jit times"));
- verification_results_.reset(new VerificationResults(compiler_options_.get()));
- method_inliner_map_.reset(new DexFileToMethodInlinerMap);
- callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
- compiler_driver_.reset(new CompilerDriver(
- compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
- Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
- nullptr, new std::set<std::string>, 1, false, true,
- std::string(), cumulative_logger_.get(), -1, std::string()));
- // Disable dedupe so we can remove compiled methods.
- compiler_driver_->SetDedupeEnabled(false);
- compiler_driver_->SetSupportBootImageFixup(false);
-}
-
-JitCompiler::~JitCompiler() {
-}
-
-bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
- uint64_t start_time = NanoTime();
- StackHandleScope<2> hs(self);
- self->AssertNoPendingException();
- Runtime* runtime = Runtime::Current();
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
- if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
- VLOG(jit) << "Already compiled " << PrettyMethod(method);
- return true; // Already compiled
- }
- Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass()));
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
- return false;
- }
- const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
- MethodReference method_ref(dex_file, h_method->GetDexMethodIndex());
- // Only verify if we don't already have verification results.
- if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
- std::string error;
- if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) ==
- verifier::MethodVerifier::kHardFailure) {
- VLOG(jit) << "Not compile method " << PrettyMethod(h_method.Get())
- << " due to verification failure " << error;
- return false;
- }
- }
- CompiledMethod* compiled_method(compiler_driver_->CompileMethod(self, h_method.Get()));
- if (compiled_method == nullptr) {
- return false;
- }
- total_time_ += NanoTime() - start_time;
- const bool result = MakeExecutable(compiled_method, h_method.Get());
- // Remove the compiled method to save memory.
- compiler_driver_->RemoveCompiledMethod(method_ref);
- return result;
-}
-
-CompilerCallbacks* JitCompiler::GetCompilerCallbacks() const {
- return callbacks_.get();
-}
-
-uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method,
- uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table,
- const uint8_t* vmap_table,
- const uint8_t* gc_map) {
- reserve_begin += sizeof(OatQuickMethodHeader);
- reserve_begin = reinterpret_cast<uint8_t*>(
- compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin)));
- const auto* quick_code = compiled_method->GetQuickCode();
- CHECK_LE(reserve_begin, reserve_end);
- CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin));
- auto* code_ptr = reserve_begin;
- OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
- // Construct the header last.
- const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- const auto core_spill_mask = compiled_method->GetCoreSpillMask();
- const auto fp_spill_mask = compiled_method->GetFpSpillMask();
- const auto code_size = quick_code->size();
- CHECK_NE(code_size, 0U);
- std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
- // After we are done writing we need to update the method header.
- // Write out the method header last.
- method_header = new(method_header)OatQuickMethodHeader(
- code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes,
- core_spill_mask, fp_spill_mask, code_size);
- // Return the code ptr.
- return code_ptr;
-}
-
-bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) {
- Runtime* runtime = Runtime::Current();
- JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
- const auto* quick_code = compiled_method->GetQuickCode();
- if (quick_code == nullptr) {
- return false;
- }
- const auto code_size = quick_code->size();
- Thread* const self = Thread::Current();
- const uint8_t* base = code_cache->CodeCachePtr();
- auto* const mapping_table = compiled_method->GetMappingTable();
- auto* const vmap_table = compiled_method->GetVmapTable();
- auto* const gc_map = compiled_method->GetGcMap();
- // Write out pre-header stuff.
- uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
- self, mapping_table->data(), mapping_table->data() + mapping_table->size());
- if (mapping_table == nullptr) {
- return false; // Out of data cache.
- }
- uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
- self, vmap_table->data(), vmap_table->data() + vmap_table->size());
- if (vmap_table == nullptr) {
- return false; // Out of data cache.
- }
- uint8_t* const gc_map_ptr = code_cache->AddDataArray(
- self, gc_map->data(), gc_map->data() + gc_map->size());
- if (gc_map == nullptr) {
- return false; // Out of data cache.
- }
- // Don't touch this until you protect / unprotect the code.
- const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
- uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
- if (code_reserve == nullptr) {
- return false;
- }
- auto* code_ptr = WriteMethodHeaderAndCode(
- compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr,
- vmap_table_ptr, gc_map_ptr);
-
- const size_t thumb_offset = compiled_method->CodeDelta();
- const uint32_t code_offset = code_ptr - base + thumb_offset;
- *out_method = OatFile::OatMethod(base, code_offset);
- DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
- DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
- DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
- DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
- DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
- DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
- VLOG(jit) << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size="
- << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr)
- << "," << reinterpret_cast<void*>(code_ptr + code_size);
- return true;
-}
-
-bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) {
- CHECK(method != nullptr);
- CHECK(compiled_method != nullptr);
- OatFile::OatMethod oat_method(nullptr, 0);
- if (!AddToCodeCache(method, compiled_method, &oat_method)) {
- return false;
- }
- // TODO: Flush instruction cache.
- oat_method.LinkMethod(method);
- CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method))
- << PrettyMethod(method);
- return true;
-}
-
-} // namespace jit
-} // namespace art
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
deleted file mode 100644
index 08764991ea..0000000000
--- a/compiler/jit/jit_compiler.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_JIT_JIT_COMPILER_H_
-#define ART_COMPILER_JIT_JIT_COMPILER_H_
-
-#include "base/mutex.h"
-#include "compiler_callbacks.h"
-#include "compiled_method.h"
-#include "dex/verification_results.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "oat_file.h"
-
-namespace art {
-
-class InstructionSetFeatures;
-
-namespace mirror {
-class ArtMethod;
-}
-
-namespace jit {
-
-class JitCompiler {
- public:
- static JitCompiler* Create();
- virtual ~JitCompiler();
- bool CompileMethod(Thread* self, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- CompilerCallbacks* GetCompilerCallbacks() const;
- size_t GetTotalCompileTime() const {
- return total_time_;
- }
-
- private:
- uint64_t total_time_;
- std::unique_ptr<CompilerOptions> compiler_options_;
- std::unique_ptr<CumulativeLogger> cumulative_logger_;
- std::unique_ptr<VerificationResults> verification_results_;
- std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
- std::unique_ptr<CompilerCallbacks> callbacks_;
- std::unique_ptr<CompilerDriver> compiler_driver_;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
-
- explicit JitCompiler();
- uint8_t* WriteMethodHeaderAndCode(
- const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
- bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-};
-
-} // namespace jit
-
-} // namespace art
-
-#endif // ART_COMPILER_JIT_JIT_COMPILER_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 841109105d..9c0157e885 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -549,7 +549,7 @@ struct OatWriter::MappingTableDataAccess {
struct OatWriter::VmapTableDataAccess {
static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return compiled_method->GetVmapTable();
+ return &compiled_method->GetVmapTable();
}
static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 980611fe96..e020d31d7e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -341,8 +341,8 @@ class OatWriter {
if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
return lhs->GetMappingTable() < rhs->GetMappingTable();
}
- if (UNLIKELY(lhs->GetVmapTable() != rhs->GetVmapTable())) {
- return lhs->GetVmapTable() < rhs->GetVmapTable();
+ if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
+ return &lhs->GetVmapTable() < &rhs->GetVmapTable();
}
if (UNLIKELY(lhs->GetGcMap() != rhs->GetGcMap())) {
return lhs->GetGcMap() < rhs->GetGcMap();
diff --git a/runtime/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
index 1a671c5043..bb2c592f13 100644
--- a/runtime/dex_instruction_utils.h
+++ b/compiler/utils/dex_instruction_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
-#define ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
+#ifndef ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
+#define ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
#include "dex_instruction.h"
@@ -58,11 +58,6 @@ constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
opcode != Instruction::RETURN_VOID_BARRIER;
}
-constexpr bool IsInstructionQuickInvoke(Instruction::Code opcode) {
- return opcode == Instruction::INVOKE_VIRTUAL_QUICK ||
- opcode == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
-}
-
constexpr bool IsInstructionInvokeStatic(Instruction::Code opcode) {
return opcode == Instruction::INVOKE_STATIC || opcode == Instruction::INVOKE_STATIC_RANGE;
}
@@ -107,11 +102,6 @@ constexpr bool IsInstructionIGetOrIPut(Instruction::Code code) {
return Instruction::IGET <= code && code <= Instruction::IPUT_SHORT;
}
-constexpr bool IsInstructionIGetQuickOrIPutQuick(Instruction::Code code) {
- return (code >= Instruction::IGET_QUICK && code <= Instruction::IPUT_OBJECT_QUICK) ||
- (code >= Instruction::IPUT_BOOLEAN_QUICK && code <= Instruction::IGET_SHORT_QUICK);
-}
-
constexpr bool IsInstructionSGetOrSPut(Instruction::Code code) {
return Instruction::SGET <= code && code <= Instruction::SPUT_SHORT;
}
@@ -191,29 +181,6 @@ constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
}
-static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
- DCHECK(IsInstructionIGetQuickOrIPutQuick(code));
- switch (code) {
- case Instruction::IGET_QUICK: case Instruction::IPUT_QUICK:
- return kDexMemAccessWord;
- case Instruction::IGET_WIDE_QUICK: case Instruction::IPUT_WIDE_QUICK:
- return kDexMemAccessWide;
- case Instruction::IGET_OBJECT_QUICK: case Instruction::IPUT_OBJECT_QUICK:
- return kDexMemAccessObject;
- case Instruction::IGET_BOOLEAN_QUICK: case Instruction::IPUT_BOOLEAN_QUICK:
- return kDexMemAccessBoolean;
- case Instruction::IGET_BYTE_QUICK: case Instruction::IPUT_BYTE_QUICK:
- return kDexMemAccessByte;
- case Instruction::IGET_CHAR_QUICK: case Instruction::IPUT_CHAR_QUICK:
- return kDexMemAccessChar;
- case Instruction::IGET_SHORT_QUICK: case Instruction::IPUT_SHORT_QUICK:
- return kDexMemAccessShort;
- default:
- LOG(FATAL) << code;
- UNREACHABLE();
- }
-}
-
constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGetOrSPut(opcode));
@@ -230,4 +197,4 @@ constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
} // namespace art
-#endif // ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
+#endif // ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
diff --git a/runtime/Android.mk b/runtime/Android.mk
index ab346e3337..c647cc2a3e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -88,9 +88,6 @@ LIBART_COMMON_SRC_FILES := \
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
jni_env_ext.cc \
- jit/jit.cc \
- jit/jit_code_cache.cc \
- jit/jit_instrumentation.cc \
jni_internal.cc \
jobject_comparator.cc \
mem_map.cc \
@@ -301,7 +298,6 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
- dex_instruction_utils.h \
gc_root.h \
gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 3d007ba1e5..cc1a4a1e62 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -42,7 +42,6 @@ struct LogVerbosity {
bool gc;
bool heap;
bool jdwp;
- bool jit;
bool jni;
bool monitor;
bool profiler;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 45d2347ee2..745b2099cf 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -67,7 +67,6 @@ enum LockLevel {
kReferenceQueueWeakReferencesLock,
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
- kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8fe2ec8eb5..ee66b49e7d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -43,8 +43,6 @@
#include "handle_scope.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
@@ -93,14 +91,15 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
// failed in verification, in which case v2 5.4.1 says we need to re-throw
// the previous error.
- Runtime* const runtime = Runtime::Current();
- if (!runtime->IsAotCompiler()) { // Give info if this occurs at runtime.
+ Runtime* runtime = Runtime::Current();
+ bool is_compiler = runtime->IsCompiler();
+ if (!is_compiler) { // Give info if this occurs at runtime.
LOG(INFO) << "Rejecting re-init on previously-failed class " << PrettyClass(c);
}
CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus();
Thread* self = Thread::Current();
- if (runtime->IsAotCompiler()) {
+ if (is_compiler) {
// At compile time, accurate errors and NCDFE are disabled to speed compilation.
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(ThrowLocation(), pre_allocated);
@@ -429,7 +428,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
// we do not need friend classes or a publicly exposed setter.
quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
- if (!runtime->IsAotCompiler()) {
+ if (!runtime->IsCompiler()) {
// We need to set up the generic trampolines since we don't have an image.
quick_resolution_trampoline_ = GetQuickResolutionStub();
quick_imt_conflict_trampoline_ = GetQuickImtConflictStub();
@@ -1033,7 +1032,8 @@ const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_l
const char* oat_location,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg));
+ !Runtime::Current()->IsCompiler(),
+ error_msg));
if (oat_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
error_msg->c_str());
@@ -1104,8 +1104,8 @@ const OatFile* ClassLinker::CreateOatFileForDexLocation(const char* dex_location
return nullptr;
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(),
- &error_msg));
+ !Runtime::Current()->IsCompiler(),
+ &error_msg));
if (oat_file.get() == nullptr) {
std::string compound_msg = StringPrintf("\nFailed to open generated oat file '%s': %s",
oat_location, error_msg.c_str());
@@ -1345,7 +1345,7 @@ const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_lo
*already_opened = false;
const Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
- bool executable = !runtime->IsAotCompiler();
+ bool executable = !runtime->IsCompiler();
std::string odex_error_msg;
bool should_patch_system = false;
@@ -1513,7 +1513,7 @@ const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
bool success = Exec(argv, error_msg);
if (success) {
std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
- !runtime->IsAotCompiler(), error_msg));
+ !runtime->IsCompiler(), error_msg));
bool checksum_verified = false;
if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
error_msg)) {
@@ -1527,7 +1527,7 @@ const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
- } else if (!runtime->IsAotCompiler()) {
+ } else if (!runtime->IsCompiler()) {
// patchoat failed which means we probably don't have enough room to place the output oat file,
// instead of failing we should just run the interpreter from the dex files in the input oat.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
@@ -1614,20 +1614,22 @@ const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_lo
if (oat_file != nullptr) {
return oat_file;
}
- return OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg);
+
+ return OatFile::Open(oat_location, oat_location, nullptr, nullptr, !Runtime::Current()->IsCompiler(),
+ error_msg);
}
void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
DCHECK(obj != nullptr);
DCHECK(class_linker != nullptr);
+ size_t pointer_size = class_linker->image_pointer_size_;
+
if (obj->IsArtMethod()) {
mirror::ArtMethod* method = obj->AsArtMethod();
if (!method->IsNative()) {
- const size_t pointer_size = class_linker->image_pointer_size_;
method->SetEntryPointFromInterpreterPtrSize(artInterpreterToInterpreterBridge, pointer_size);
- if (!method->IsRuntimeMethod() && method != Runtime::Current()->GetResolutionMethod()) {
+ if (method != Runtime::Current()->GetResolutionMethod()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
pointer_size);
}
@@ -1696,8 +1698,8 @@ void ClassLinker::InitFromImage() {
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
- if (!Runtime::Current()->IsAotCompiler()) {
- // Aot compiler supports having an image with a different pointer size than the runtime. This
+ if (!Runtime::Current()->IsCompiler()) {
+ // Compiler supports having an image with a different pointer size than the runtime. This
// happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
// also use 32 bit dex2oat on a system with 64 bit apps.
CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
@@ -1712,7 +1714,7 @@ void ClassLinker::InitFromImage() {
// Set entry point to interpreter if in InterpretOnly mode.
Runtime* runtime = Runtime::Current();
- if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
+ if (!runtime->IsCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
@@ -2515,44 +2517,31 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
return GetQuickProxyInvokeHandler();
}
bool found;
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ const void* result = nullptr;
if (found) {
- auto* code = oat_method.GetQuickCode();
- if (code != nullptr) {
- return code;
- }
+ result = oat_method.GetQuickCode();
}
- if (method->IsNative()) {
- // No code and native? Use generic trampoline.
- return GetQuickGenericJniStub();
+
+ if (result == nullptr) {
+ if (method->IsNative()) {
+ // No code and native? Use generic trampoline.
+ result = GetQuickGenericJniStub();
+ } else {
+ // No code? You must mean to go into the interpreter.
+ result = GetQuickToInterpreterBridge();
+ }
}
- return GetQuickToInterpreterBridge();
+ return result;
}
const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
return nullptr;
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
bool found;
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- if (found) {
- return oat_method.GetQuickCode();
- }
- return nullptr;
+ return found ? oat_method.GetQuickCode() : nullptr;
}
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2588,7 +2577,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
}
Runtime* runtime = Runtime::Current();
if (!runtime->IsStarted() || runtime->UseCompileTimeClassPath()) {
- if (runtime->IsAotCompiler() || runtime->GetHeap()->HasImageSpace()) {
+ if (runtime->IsCompiler() || runtime->GetHeap()->HasImageSpace()) {
return; // OAT file unavailable.
}
}
@@ -2641,7 +2630,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
- if (runtime->IsAotCompiler()) {
+ if (runtime->IsCompiler()) {
// The following code only applies to a non-compiler runtime.
return;
}
@@ -3480,7 +3469,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
EnsurePreverifiedMethods(klass);
return;
}
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
+ if (klass->IsCompileTimeVerified() && Runtime::Current()->IsCompiler()) {
return;
}
@@ -3496,7 +3485,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
<< PrettyClass(klass.Get());
- CHECK(!Runtime::Current()->IsAotCompiler());
+ CHECK(!Runtime::Current()->IsCompiler());
klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
}
@@ -3532,7 +3521,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
self->GetException(nullptr)->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
- if (Runtime::Current()->IsAotCompiler()) {
+ if (Runtime::Current()->IsCompiler()) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -3557,7 +3546,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
std::string error_msg;
if (!preverified) {
verifier_failure = verifier::MethodVerifier::VerifyClass(self, klass.Get(),
- Runtime::Current()->IsAotCompiler(),
+ Runtime::Current()->IsCompiler(),
&error_msg);
}
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
@@ -3585,7 +3574,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
// Soft failures at compile time should be retried at runtime. Soft
// failures at runtime will be handled by slow paths in the generated
// code. Set status accordingly.
- if (Runtime::Current()->IsAotCompiler()) {
+ if (Runtime::Current()->IsCompiler()) {
klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
} else {
klass->SetStatus(mirror::Class::kStatusVerified, self);
@@ -3626,7 +3615,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
// we are not compiling the image or if the class we're verifying is not part of
// the app. In other words, we will only check for preverification of bootclasspath
// classes.
- if (Runtime::Current()->IsAotCompiler()) {
+ if (Runtime::Current()->IsCompiler()) {
// Are we compiling the bootclasspath?
if (!Runtime::Current()->UseCompileTimeClassPath()) {
return false;
@@ -3652,7 +3641,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
// image (that we just failed loading), and the verifier can't be run on quickened opcodes when
// the runtime isn't started. On the other hand, app classes can be re-verified even if they are
// already pre-opted, as then the runtime is started.
- if (!Runtime::Current()->IsAotCompiler() &&
+ if (!Runtime::Current()->IsCompiler() &&
!Runtime::Current()->GetHeap()->HasImageSpace() &&
klass->GetClassLoader() != nullptr) {
return false;
@@ -4100,7 +4089,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
CHECK(self->IsExceptionPending());
VlogClassInitializationFailure(klass);
} else {
- CHECK(Runtime::Current()->IsAotCompiler());
+ CHECK(Runtime::Current()->IsCompiler());
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
}
return false;
@@ -4281,8 +4270,7 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
continue;
}
- if (klass->GetStatus() == mirror::Class::kStatusVerified &&
- Runtime::Current()->IsAotCompiler()) {
+ if (klass->GetStatus() == mirror::Class::kStatusVerified && Runtime::Current()->IsCompiler()) {
// Compile time initialization failed.
return false;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f33c0f8bdf..a3d3b470cc 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -287,13 +287,6 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
}
- // We only care about how many backward branches were executed in the Jit.
- void BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, int32_t dex_pc_offset)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
- << " " << dex_pc_offset;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
} gDebugInstrumentationListener;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c8ede48b7d..e121a08584 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1028,13 +1028,6 @@ class DexFile {
// from an oat file, e.g., directly from an apk.
const OatFile* oat_file_;
};
-
-struct DexFileReference {
- DexFileReference(const DexFile* file, uint32_t idx) : dex_file(file), index(idx) { }
- const DexFile* dex_file;
- uint32_t index;
-};
-
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
// Iterate over a dex file's ProtoId's paramters
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9e159c2533..419d5558a8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -655,7 +655,7 @@ void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
}
bool Heap::IsCompilingBoot() const {
- if (!Runtime::Current()->IsAotCompiler()) {
+ if (!Runtime::Current()->IsCompiler()) {
return false;
}
for (const auto& space : continuous_spaces_) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 14f770da63..d873e6d994 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -788,7 +788,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
- !Runtime::Current()->IsAotCompiler(), error_msg);
+ !Runtime::Current()->IsCompiler(), error_msg);
if (oat_file == NULL) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index a054462b2c..90115c3887 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -31,8 +31,6 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
#include "interpreter/interpreter.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -94,16 +92,6 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) {
static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Runtime* const runtime = Runtime::Current();
- jit::Jit* jit = runtime->GetJit();
- if (jit != nullptr) {
- const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
- jit::JitCodeCache* code_cache = jit->GetCodeCache();
- if (code_cache->ContainsCodePtr(old_code_ptr)) {
- // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
- code_cache->SaveCompiledCode(method, old_code_ptr);
- }
- }
method->SetEntryPointFromQuickCompiledCode(quick_code);
if (!method->IsResolutionMethod()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -132,8 +120,7 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
}
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
bool is_class_initialized = method->GetDeclaringClass()->IsInitialized();
if (uninstall) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
@@ -156,6 +143,7 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
new_quick_code = GetQuickInstrumentationEntryPoint();
} else {
new_quick_code = class_linker->GetQuickOatCodeFor(method);
+ DCHECK(!class_linker->IsQuickToInterpreterBridge(new_quick_code));
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -409,10 +397,6 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
method_unwind_listeners_.push_back(listener);
have_method_unwind_listeners_ = true;
}
- if ((events & kBackwardBranch) != 0) {
- backward_branch_listeners_.push_back(listener);
- have_backward_branch_listeners_ = true;
- }
if ((events & kDexPcMoved) != 0) {
std::list<InstrumentationListener*>* modified;
if (have_dex_pc_listeners_) {
@@ -920,13 +904,6 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o
}
}
-void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method,
- int32_t offset) const {
- for (InstrumentationListener* listener : backward_branch_listeners_) {
- listener->BackwardBranch(thread, method, offset);
- }
-}
-
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index b667a40fc8..cea03883dc 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -94,10 +94,6 @@ struct InstrumentationListener {
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
-
- // Call-back for when we get a backward branch.
- virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -107,14 +103,13 @@ struct InstrumentationListener {
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 0x1,
- kMethodExited = 0x2,
- kMethodUnwind = 0x4,
- kDexPcMoved = 0x8,
- kFieldRead = 0x10,
- kFieldWritten = 0x20,
- kExceptionCaught = 0x40,
- kBackwardBranch = 0x80,
+ kMethodEntered = 1, // 1 << 0
+ kMethodExited = 2, // 1 << 1
+ kMethodUnwind = 4, // 1 << 2
+ kDexPcMoved = 8, // 1 << 3
+ kFieldRead = 16, // 1 << 4,
+ kFieldWritten = 32, // 1 << 5
+ kExceptionCaught = 64, // 1 << 6
};
Instrumentation();
@@ -249,10 +244,6 @@ class Instrumentation {
return have_exception_caught_listeners_;
}
- bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return have_backward_branch_listeners_;
- }
-
bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
@@ -293,14 +284,6 @@ class Instrumentation {
}
}
- // Inform listeners that a backward branch has been taken (only supported by the interpreter).
- void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(HasBackwardBranchListeners())) {
- BackwardBranchImpl(thread, method, offset);
- }
- }
-
// Inform listeners that we read a field (only supported by the interpreter).
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
@@ -378,8 +361,6 @@ class Instrumentation {
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const
@@ -448,14 +429,10 @@ class Instrumentation {
// Do we have any exception caught listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
- // Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
- bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
-
// The event listeners, written to with the mutator_lock_ exclusively held.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 37324eac82..e4b324792e 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -54,12 +54,6 @@ namespace interpreter {
#define UPDATE_HANDLER_TABLE() \
currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
-#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
- do { \
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); \
- instrumentation->BackwardBranch(self, shadow_frame.GetMethod(), offset); \
- } while (false)
-
#define UNREACHABLE_CODE_CHECK() \
do { \
if (kIsDebugBuild) { \
@@ -141,7 +135,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
}
};
- constexpr bool do_assignability_check = do_access_check;
+ const bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
return JValue();
@@ -614,7 +608,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO) {
int8_t offset = inst->VRegA_10t(inst_data);
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -627,7 +620,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO_16) {
int16_t offset = inst->VRegA_20t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -640,7 +632,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO_32) {
int32_t offset = inst->VRegA_30t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -653,7 +644,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -666,7 +656,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -769,7 +758,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -786,7 +774,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -803,7 +790,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -820,7 +806,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -837,7 +822,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -854,7 +838,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -871,7 +854,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -888,7 +870,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -905,7 +886,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -922,7 +902,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -939,7 +918,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -956,7 +934,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
- BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 08332d3499..ea7c1927ce 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -337,8 +337,7 @@ class JII {
thread_group = args->group;
}
- if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group,
- !runtime->IsAotCompiler())) {
+ if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
*p_env = nullptr;
return JNI_ERR;
} else {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
deleted file mode 100644
index 539c181952..0000000000
--- a/runtime/jit/jit.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jit.h"
-
-#include <dlfcn.h>
-
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "interpreter/interpreter.h"
-#include "jit_code_cache.h"
-#include "jit_instrumentation.h"
-#include "mirror/art_method-inl.h"
-#include "runtime.h"
-#include "runtime_options.h"
-#include "thread_list.h"
-#include "utils.h"
-
-namespace art {
-namespace jit {
-
-JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
- if (!options.GetOrDefault(RuntimeArgumentMap::UseJIT)) {
- return nullptr;
- }
- auto* jit_options = new JitOptions;
- jit_options->code_cache_capacity_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
- jit_options->compile_threshold_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
- return jit_options;
-}
-
-Jit::Jit()
- : jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
- jit_compile_method_(nullptr) {
-}
-
-Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
- std::unique_ptr<Jit> jit(new Jit);
- if (!jit->LoadCompiler(error_msg)) {
- return nullptr;
- }
- jit->code_cache_.reset(JitCodeCache::Create(options->GetCodeCacheCapacity(), error_msg));
- if (jit->GetCodeCache() == nullptr) {
- return nullptr;
- }
- LOG(INFO) << "JIT created with code_cache_capacity="
- << PrettySize(options->GetCodeCacheCapacity())
- << " compile_threshold=" << options->GetCompileThreshold();
- return jit.release();
-}
-
-bool Jit::LoadCompiler(std::string* error_msg) {
- jit_library_handle_ = dlopen(
- kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
- if (jit_library_handle_ == nullptr) {
- std::ostringstream oss;
- oss << "JIT could not load libart-compiler.so: " << dlerror();
- *error_msg = oss.str();
- return false;
- }
- jit_load_ = reinterpret_cast<void* (*)(CompilerCallbacks**)>(
- dlsym(jit_library_handle_, "jit_load"));
- if (jit_load_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_load entry point";
- return false;
- }
- jit_unload_ = reinterpret_cast<void (*)(void*)>(
- dlsym(jit_library_handle_, "jit_unload"));
- if (jit_unload_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_unload entry point";
- return false;
- }
- jit_compile_method_ = reinterpret_cast<bool (*)(void*, mirror::ArtMethod*, Thread*)>(
- dlsym(jit_library_handle_, "jit_compile_method"));
- if (jit_compile_method_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_compile_method entry point";
- return false;
- }
- CompilerCallbacks* callbacks = nullptr;
- VLOG(jit) << "Calling JitLoad interpreter_only="
- << Runtime::Current()->GetInstrumentation()->InterpretOnly();
- jit_compiler_handle_ = (jit_load_)(&callbacks);
- if (jit_compiler_handle_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't load compiler";
- return false;
- }
- if (callbacks == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT compiler callbacks were not set";
- jit_compiler_handle_ = nullptr;
- return false;
- }
- compiler_callbacks_ = callbacks;
- return true;
-}
-
-bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) {
- DCHECK(!method->IsRuntimeMethod());
- const bool result = jit_compile_method_(jit_compiler_handle_, method, self);
- if (result) {
- method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
- }
- return result;
-}
-
-void Jit::CreateThreadPool() {
- CHECK(instrumentation_cache_.get() != nullptr);
- instrumentation_cache_->CreateThreadPool();
-}
-
-void Jit::DeleteThreadPool() {
- if (instrumentation_cache_.get() != nullptr) {
- instrumentation_cache_->DeleteThreadPool();
- }
-}
-
-Jit::~Jit() {
- DeleteThreadPool();
- if (jit_compiler_handle_ != nullptr) {
- jit_unload_(jit_compiler_handle_);
- }
- if (jit_library_handle_ != nullptr) {
- dlclose(jit_library_handle_);
- }
-}
-
-void Jit::CreateInstrumentationCache(size_t compile_threshold) {
- CHECK_GT(compile_threshold, 0U);
- Runtime* const runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
- // Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
- // something.
- instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
- runtime->GetInstrumentation()->AddListener(
- new jit::JitInstrumentationListener(instrumentation_cache_.get()),
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kBackwardBranch);
- runtime->GetThreadList()->ResumeAll();
-}
-
-} // namespace jit
-} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
deleted file mode 100644
index b80015feca..0000000000
--- a/runtime/jit/jit.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JIT_JIT_H_
-#define ART_RUNTIME_JIT_JIT_H_
-
-#include <unordered_map>
-
-#include "instrumentation.h"
-
-#include "atomic.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "gc_root.h"
-#include "jni.h"
-#include "object_callbacks.h"
-#include "thread_pool.h"
-
-namespace art {
-
-class CompilerCallbacks;
-struct RuntimeArgumentMap;
-
-namespace jit {
-
-class JitCodeCache;
-class JitInstrumentationCache;
-class JitOptions;
-
-class Jit {
- public:
- static constexpr bool kStressMode = kIsDebugBuild;
- static constexpr size_t kDefaultCompileThreshold = kStressMode ? 1 : 1000;
-
- virtual ~Jit();
- static Jit* Create(JitOptions* options, std::string* error_msg);
- bool CompileMethod(mirror::ArtMethod* method, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateInstrumentationCache(size_t compile_threshold);
- void CreateThreadPool();
- CompilerCallbacks* GetCompilerCallbacks() {
- return compiler_callbacks_;
- }
- const JitCodeCache* GetCodeCache() const {
- return code_cache_.get();
- }
- JitCodeCache* GetCodeCache() {
- return code_cache_.get();
- }
- void DeleteThreadPool();
-
- private:
- Jit();
- bool LoadCompiler(std::string* error_msg);
-
- // JIT compiler
- void* jit_library_handle_;
- void* jit_compiler_handle_;
- void* (*jit_load_)(CompilerCallbacks**);
- void (*jit_unload_)(void*);
- bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
-
- std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
- std::unique_ptr<jit::JitCodeCache> code_cache_;
- CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
-};
-
-class JitOptions {
- public:
- static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
- size_t GetCompileThreshold() const {
- return compile_threshold_;
- }
- size_t GetCodeCacheCapacity() const {
- return code_cache_capacity_;
- }
-
- private:
- size_t code_cache_capacity_;
- size_t compile_threshold_;
-
- JitOptions() : code_cache_capacity_(0), compile_threshold_(0) {
- }
-};
-
-} // namespace jit
-} // namespace art
-
-#endif // ART_RUNTIME_JIT_JIT_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
deleted file mode 100644
index 8d4965e70f..0000000000
--- a/runtime/jit/jit_code_cache.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jit_code_cache.h"
-
-#include <sstream>
-
-#include "mem_map.h"
-#include "mirror/art_method-inl.h"
-#include "oat_file-inl.h"
-
-namespace art {
-namespace jit {
-
-JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
- CHECK_GT(capacity, 0U);
- CHECK_LT(capacity, kMaxCapacity);
- std::string error_str;
- // Map name specific for android_os_Debug.cpp accounting.
- MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
- PROT_READ | PROT_WRITE | PROT_EXEC, false, &error_str);
- if (map == nullptr) {
- std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
- *error_msg = oss.str();
- return nullptr;
- }
- return new JitCodeCache(map);
-}
-
-JitCodeCache::JitCodeCache(MemMap* mem_map)
- : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
- VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
- mem_map_.reset(mem_map);
- uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
- // Data cache is 1 / 4 of the map. TODO: Make this variable?
- // Put data at the start.
- data_cache_ptr_ = mem_map->Begin();
- data_cache_end_ = divider;
- data_cache_begin_ = data_cache_ptr_;
- mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
- // Code cache after.
- code_cache_begin_ = divider;
- code_cache_ptr_ = divider;
- code_cache_end_ = mem_map->End();
-}
-
-bool JitCodeCache::ContainsMethod(mirror::ArtMethod* method) const {
- return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
- return ptr >= code_cache_begin_ && ptr < code_cache_end_;
-}
-
-void JitCodeCache::FlushInstructionCache() {
- UNIMPLEMENTED(FATAL);
- // TODO: Investigate if we need to do this.
- // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
-}
-
-uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
- if (size > CodeCacheRemain()) {
- return nullptr;
- }
- code_cache_ptr_ += size;
- return code_cache_ptr_ - size;
-}
-
-uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
- MutexLock mu(self, lock_);
- const size_t size = end - begin;
- if (size > DataCacheRemain()) {
- return nullptr; // Out of space in the data cache.
- }
- std::copy(begin, end, data_cache_ptr_);
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
-}
-
-const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (ContainsCodePtr(code)) {
- return code;
- }
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return it->second;
- }
- return nullptr;
-}
-
-void JitCodeCache::SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) {
- DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
- DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
- << old_code_ptr;
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return;
- }
- method_code_map_.Put(method, old_code_ptr);
-}
-
-} // namespace jit
-} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
deleted file mode 100644
index aa8c7173a5..0000000000
--- a/runtime/jit/jit_code_cache.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
-#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
-
-#include "instrumentation.h"
-
-#include "atomic.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "gc_root.h"
-#include "jni.h"
-#include "oat_file.h"
-#include "object_callbacks.h"
-#include "safe_map.h"
-#include "thread_pool.h"
-
-namespace art {
-
-class CompiledMethod;
-class CompilerCallbacks;
-
-namespace mirror {
-class ArtMethod;
-} // namespcae mirror
-
-namespace jit {
-
-class JitInstrumentationCache;
-
-class JitCodeCache {
- public:
- static constexpr size_t kMaxCapacity = 1 * GB;
- static constexpr size_t kDefaultCapacity = 2 * MB;
-
- static JitCodeCache* Create(size_t capacity, std::string* error_msg);
-
- const uint8_t* CodeCachePtr() const {
- return code_cache_ptr_;
- }
- size_t CodeCacheSize() const {
- return code_cache_ptr_ - code_cache_begin_;
- }
- size_t CodeCacheRemain() const {
- return code_cache_end_ - code_cache_ptr_;
- }
- size_t DataCacheSize() const {
- return data_cache_ptr_ - data_cache_begin_;
- }
- size_t DataCacheRemain() const {
- return data_cache_end_ - data_cache_ptr_;
- }
- size_t NumMethods() const {
- return num_methods_;
- }
-
- bool ContainsMethod(mirror::ArtMethod* method) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ContainsCodePtr(const void* ptr) const;
-
- uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
-
- uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
- LOCKS_EXCLUDED(lock_);
-
- // Get code for a method, returns null if it is not in the jit cache.
- const void* GetCodeFor(mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
-
- void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
-
- private:
- // Takes ownership of code_mem_map.
- explicit JitCodeCache(MemMap* code_mem_map);
- void FlushInstructionCache();
-
- Mutex lock_;
- // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
- // headers in code cache which point to things in the data cache. If the maps are more than 4GB
- // apart, having multiple maps wouldn't work.
- std::unique_ptr<MemMap> mem_map_;
- // Code cache section.
- uint8_t* code_cache_ptr_;
- const uint8_t* code_cache_begin_;
- const uint8_t* code_cache_end_;
- // Data cache section.
- uint8_t* data_cache_ptr_;
- const uint8_t* data_cache_begin_;
- const uint8_t* data_cache_end_;
- size_t num_methods_;
- // TODO: This relies on methods not moving.
- // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
- // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<mirror::ArtMethod*, const void*> method_code_map_;
-
- DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
-};
-
-
-} // namespace jit
-} // namespace art
-
-#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
deleted file mode 100644
index 160e6789be..0000000000
--- a/runtime/jit/jit_instrumentation.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jit_instrumentation.h"
-
-#include "jit.h"
-#include "jit_code_cache.h"
-#include "mirror/art_method-inl.h"
-#include "scoped_thread_state_change.h"
-
-namespace art {
-namespace jit {
-
-class JitCompileTask : public Task {
- public:
- explicit JitCompileTask(mirror::ArtMethod* method, JitInstrumentationCache* cache)
- : method_(method), cache_(cache) {
- }
-
- virtual void Run(Thread* self) OVERRIDE {
- ScopedObjectAccess soa(self);
- VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
- if (Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
- cache_->SignalCompiled(self, method_);
- } else {
- VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
- }
- }
-
- virtual void Finalize() OVERRIDE {
- delete this;
- }
-
- private:
- mirror::ArtMethod* const method_;
- JitInstrumentationCache* const cache_;
-};
-
-JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
- : lock_("jit instrumentation lock"), hot_method_threshold_(hot_method_threshold) {
-}
-
-void JitInstrumentationCache::CreateThreadPool() {
- thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
-}
-
-void JitInstrumentationCache::DeleteThreadPool() {
- thread_pool_.reset();
-}
-
-void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* method) {
- ScopedObjectAccessUnchecked soa(self);
- jmethodID method_id = soa.EncodeMethod(method);
- MutexLock mu(self, lock_);
- auto it = samples_.find(method_id);
- if (it != samples_.end()) {
- samples_.erase(it);
- }
-}
-
-void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method, size_t count) {
- ScopedObjectAccessUnchecked soa(self);
- // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
- // than we want resulting in samples even after the method is compiled.
- if (method->IsClassInitializer() ||
- Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
- return;
- }
- jmethodID method_id = soa.EncodeMethod(method);
- bool is_hot = false;
- {
- MutexLock mu(self, lock_);
- size_t sample_count = 0;
- auto it = samples_.find(method_id);
- if (it != samples_.end()) {
- it->second += count;
- sample_count = it->second;
- } else {
- sample_count = count;
- samples_.insert(std::make_pair(method_id, count));
- }
- // If we have enough samples, mark as hot and request Jit compilation.
- if (sample_count >= hot_method_threshold_ && sample_count - count < hot_method_threshold_) {
- is_hot = true;
- }
- }
- if (is_hot) {
- if (thread_pool_.get() != nullptr) {
- thread_pool_->AddTask(self, new JitCompileTask(method->GetInterfaceMethodIfProxy(), this));
- thread_pool_->StartWorkers(self);
- } else {
- VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
- Runtime::Current()->GetJit()->CompileMethod(method->GetInterfaceMethodIfProxy(), self);
- }
- }
-}
-
-JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache* cache)
- : instrumentation_cache_(cache) {
- CHECK(instrumentation_cache_ != nullptr);
-}
-
-} // namespace jit
-} // namespace art
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
deleted file mode 100644
index 9576f4b341..0000000000
--- a/runtime/jit/jit_instrumentation.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
-#define ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
-
-#include <unordered_map>
-
-#include "instrumentation.h"
-
-#include "atomic.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "gc_root.h"
-#include "jni.h"
-#include "object_callbacks.h"
-#include "thread_pool.h"
-
-namespace art {
-namespace mirror {
- class ArtField;
- class ArtMethod;
- class Class;
- class Object;
- class Throwable;
-} // namespace mirror
-union JValue;
-class Thread;
-class ThrowLocation;
-
-namespace jit {
-
-// Keeps track of which methods are hot.
-class JitInstrumentationCache {
- public:
- explicit JitInstrumentationCache(size_t hot_method_threshold);
- void AddSamples(Thread* self, mirror::ArtMethod* method, size_t samples)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SignalCompiled(Thread* self, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateThreadPool();
- void DeleteThreadPool();
-
- private:
- Mutex lock_;
- std::unordered_map<jmethodID, size_t> samples_;
- size_t hot_method_threshold_;
- std::unique_ptr<ThreadPool> thread_pool_;
-};
-
-class JitInstrumentationListener : public instrumentation::InstrumentationListener {
- public:
- explicit JitInstrumentationListener(JitInstrumentationCache* cache);
-
- virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
- mirror::ArtMethod* method, uint32_t /*dex_pc*/)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- instrumentation_cache_->AddSamples(thread, method, 1);
- }
- virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- const JValue& /*return_value*/)
- OVERRIDE { }
- virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
- virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- mirror::ArtField* /*field*/) OVERRIDE { }
- virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- mirror::ArtField* /*field*/, const JValue& /*field_value*/)
- OVERRIDE { }
- virtual void ExceptionCaught(Thread* /*thread*/, const ThrowLocation& /*throw_location*/,
- mirror::ArtMethod* /*catch_method*/, uint32_t /*catch_dex_pc*/,
- mirror::Throwable* /*exception_object*/) OVERRIDE { }
-
- virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
- mirror::ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
-
- // We only care about how many dex instructions were executed in the Jit.
- virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
- OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_LE(dex_pc_offset, 0);
- instrumentation_cache_->AddSamples(thread, method, 1);
- }
-
- private:
- JitInstrumentationCache* const instrumentation_cache_;
-};
-
-} // namespace jit
-} // namespace art
-
-#endif // ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 3cea4a1b9b..5a4ebd1f6e 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -44,7 +44,7 @@ void ArtField::ResetClass() {
void ArtField::SetOffset(MemberOffset num_bytes) {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
+ if (kIsDebugBuild && Runtime::Current()->IsCompiler() &&
!Runtime::Current()->UseCompileTimeClassPath()) {
Primitive::Type type = GetTypeAsPrimitiveType();
if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index c27c6e9441..7d31148aab 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -147,10 +147,7 @@ inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx, bool r
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
-}
-
-inline uint32_t ArtMethod::GetCodeSize(const void* code) {
+ const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
if (code == nullptr) {
return 0u;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 6259745766..b2016dcc82 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -27,8 +27,6 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
#include "jni_internal.h"
#include "mapping_table.h"
#include "object_array-inl.h"
@@ -231,7 +229,6 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
if (abort_on_failure) {
LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
<< "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
- << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
<< ") in " << PrettyMethod(this);
}
return DexFile::kDexNoIndex;
@@ -332,13 +329,6 @@ void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
class_linker->IsQuickResolutionStub(code)) {
return;
}
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
- return;
- }
/*
* During a stack walk, a return PC may point past-the-end of the code
* in the case that the last instruction is a call that isn't expected to
@@ -346,11 +336,11 @@ void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
*
* NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
*/
- CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
+ CHECK(PcIsWithinQuickCode(pc))
<< PrettyMethod(this)
<< " pc=" << std::hex << pc
<< " code=" << code
- << " size=" << GetCodeSize(reinterpret_cast<const void*>(code));
+ << " size=" << GetCodeSize();
}
bool ArtMethod::IsEntrypointInterpreter() {
@@ -420,8 +410,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
}
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
- if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- DCHECK(!runtime->UseJit());
+ if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
CHECK(IsEntrypointInterpreter())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 019fdcdee3..f33ca943d7 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -305,8 +305,18 @@ class MANAGED ArtMethod FINAL : public Object {
// quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
// debug purposes.
bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return PcIsWithinQuickCode(
- reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
+ uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
+ if (code == 0) {
+ return pc == 0;
+ }
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ return code <= pc && pc <= code + GetCodeSize();
}
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -612,23 +622,6 @@ class MANAGED ArtMethod FINAL : public Object {
return offset;
}
- // Code points to the start of the quick code.
- static uint32_t GetCodeSize(const void* code);
-
- static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
- if (code == 0) {
- return pc == 0;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- return code <= pc && pc <= code + GetCodeSize(reinterpret_cast<const void*>(code));
- }
-
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 3192e0349d..9061bb3d55 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -577,12 +577,12 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
}
if (oat_file_->IsExecutable() ||
Runtime::Current() == nullptr || // This case applies for oatdump.
- Runtime::Current()->IsAotCompiler()) {
+ Runtime::Current()->IsCompiler()) {
return OatMethod(oat_file_->Begin(), oat_method_offsets->code_offset_);
+ } else {
+ // We aren't allowed to use the compiled code. We just force it down the interpreted version.
+ return OatMethod(oat_file_->Begin(), 0);
}
- // We aren't allowed to use the compiled code. We just force it down the interpreted / jit
- // version.
- return OatMethod(oat_file_->Begin(), 0);
}
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 5e6843923e..6ae3c3e7fc 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -147,8 +147,8 @@ class OatFile {
return reinterpret_cast<T>(begin_ + offset);
}
- const uint8_t* begin_;
- uint32_t code_offset_;
+ const uint8_t* const begin_;
+ const uint32_t code_offset_;
friend class OatClass;
};
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index 749fb5d0bd..f7accc0f31 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,7 +47,6 @@ void ObjectLock<T>::NotifyAll() {
obj_->NotifyAll(self_);
}
-template class ObjectLock<mirror::ArtMethod>;
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 7bdd996f32..99369ca35d 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,15 +146,6 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
- .Define({"-Xjit", "-Xnojit"})
- .WithValues({true, false})
- .IntoKey(M::UseJIT)
- .Define("-Xjitcodecachesize:_")
- .WithType<MemoryKiB>()
- .IntoKey(M::JITCodeCacheCapacity)
- .Define("-Xjitthreshold:_")
- .WithType<unsigned int>()
- .IntoKey(M::JITCompileThreshold)
.Define("-XX:HspaceCompactForOOMMinIntervalMs=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
@@ -255,7 +246,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
"-Xdexopt:_", "-Xnoquithandler", "-Xjnigreflimit:_", "-Xgenregmap", "-Xnogenregmap",
"-Xverifyopt:_", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:_",
- "-Xincludeselectedmethod", "-Xjitthreshold:_",
+ "-Xincludeselectedmethod", "-Xjitthreshold:_", "-Xjitcodecachesize:_",
"-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:_", "-Xjitoffset:_",
"-Xjitconfig:_", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
"-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=_"})
@@ -360,20 +351,19 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
- // gLogVerbosity.class_linker = true; // TODO: don't check this in!
- // gLogVerbosity.compiler = true; // TODO: don't check this in!
- // gLogVerbosity.gc = true; // TODO: don't check this in!
- // gLogVerbosity.heap = true; // TODO: don't check this in!
- // gLogVerbosity.jdwp = true; // TODO: don't check this in!
- // gLogVerbosity.jit = true; // TODO: don't check this in!
- // gLogVerbosity.jni = true; // TODO: don't check this in!
- // gLogVerbosity.monitor = true; // TODO: don't check this in!
- // gLogVerbosity.profiler = true; // TODO: don't check this in!
- // gLogVerbosity.signals = true; // TODO: don't check this in!
- // gLogVerbosity.startup = true; // TODO: don't check this in!
- // gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
- // gLogVerbosity.threads = true; // TODO: don't check this in!
- // gLogVerbosity.verifier = true; // TODO: don't check this in!
+// gLogVerbosity.class_linker = true; // TODO: don't check this in!
+// gLogVerbosity.compiler = true; // TODO: don't check this in!
+// gLogVerbosity.gc = true; // TODO: don't check this in!
+// gLogVerbosity.heap = true; // TODO: don't check this in!
+// gLogVerbosity.jdwp = true; // TODO: don't check this in!
+// gLogVerbosity.jni = true; // TODO: don't check this in!
+// gLogVerbosity.monitor = true; // TODO: don't check this in!
+// gLogVerbosity.profiler = true; // TODO: don't check this in!
+// gLogVerbosity.signals = true; // TODO: don't check this in!
+// gLogVerbosity.startup = true; // TODO: don't check this in!
+// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
+// gLogVerbosity.threads = true; // TODO: don't check this in!
+// gLogVerbosity.verifier = true; // TODO: don't check this in!
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
@@ -568,7 +558,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, "The following standard options are supported:\n");
UsageMessage(stream, " -classpath classpath (-cp classpath)\n");
UsageMessage(stream, " -Dproperty=value\n");
- UsageMessage(stream, " -verbose:tag ('gc', 'jit', 'jni', or 'class')\n");
+ UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
UsageMessage(stream, " -showversion\n");
UsageMessage(stream, " -help\n");
UsageMessage(stream, " -agentlib:jdwp=options\n");
@@ -598,8 +588,6 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:ForegroundHeapGrowthMultiplier=doublevalue\n");
UsageMessage(stream, " -XX:LowMemoryMode\n");
UsageMessage(stream, " -Xprofile:{threadcpuclock,wallclock,dualclock}\n");
- UsageMessage(stream, " -Xjitcodecachesize:N\n");
- UsageMessage(stream, " -Xjitthreshold:integervalue\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following unique to ART options are supported:\n");
@@ -640,8 +628,6 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, " -Xpatchoat:filename\n");
- UsageMessage(stream, " -Xjit\n");
- UsageMessage(stream, " -Xnojit\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
@@ -669,6 +655,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xincludeselectedop\n");
UsageMessage(stream, " -Xjitop:hexopvalue[-endvalue][,hexopvalue[-endvalue]]*\n");
UsageMessage(stream, " -Xincludeselectedmethod\n");
+ UsageMessage(stream, " -Xjitthreshold:integervalue\n");
+ UsageMessage(stream, " -Xjitcodecachesize:decimalvalueofkbytes\n");
UsageMessage(stream, " -Xjitblocking\n");
UsageMessage(stream, " -Xjitmethod:signature[,signature]* (eg Ljava/lang/String\\;replace)\n");
UsageMessage(stream, " -Xjitclass:classname[,classname]*\n");
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index db372c3277..c3bdcb1f5c 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -161,7 +161,7 @@ void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsAotCompiler()));
+ !runtime->IsCompiler()));
Thread* self = Thread::Current();
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 44e28443dd..d65b2d5241 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -77,9 +77,7 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
InlineMethod* method) {
DCHECK(verifier != nullptr);
DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
- if (!Runtime::Current()->UseJit()) {
- DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
- }
+ DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
// We currently support only plain return or 2-instruction methods.
const DexFile::CodeItem* code_item = verifier->CodeItem();
@@ -112,10 +110,6 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT:
case Instruction::IGET_WIDE:
- // TODO: Add handling for JIT.
- // case Instruction::IGET_QUICK:
- // case Instruction::IGET_WIDE_QUICK:
- // case Instruction::IGET_OBJECT_QUICK:
return AnalyseIGetMethod(verifier, method);
case Instruction::IPUT:
case Instruction::IPUT_OBJECT:
@@ -124,10 +118,6 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_WIDE:
- // TODO: Add handling for JIT.
- // case Instruction::IPUT_QUICK:
- // case Instruction::IPUT_WIDE_QUICK:
- // case Instruction::IPUT_OBJECT_QUICK:
return AnalyseIPutMethod(verifier, method);
default:
return false;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 749418df00..f38f65e2ad 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -65,8 +65,6 @@
#include "image.h"
#include "instrumentation.h"
#include "intern_table.h"
-#include "interpreter/interpreter.h"
-#include "jit/jit.h"
#include "jni_internal.h"
#include "mirror/array.h"
#include "mirror/art_field-inl.h"
@@ -227,12 +225,6 @@ Runtime::~Runtime() {
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
- if (jit_.get() != nullptr) {
- VLOG(jit) << "Deleting jit thread pool";
- // Delete thread pool before the thread list since we don't want to wait forever on the
- // JIT compiler threads.
- jit_->DeleteThreadPool();
- }
// Make sure our internal threads are dead before we start tearing down things they're using.
Dbg::StopJdwp();
@@ -241,13 +233,6 @@ Runtime::~Runtime() {
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
delete thread_list_;
- // Delete the JIT after thread list to ensure that there is no remaining threads which could be
- // accessing the instrumentation when we delete it.
- if (jit_.get() != nullptr) {
- VLOG(jit) << "Deleting jit";
- jit_.reset(nullptr);
- }
-
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -470,24 +455,17 @@ bool Runtime::Start() {
started_ = true;
- // Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
- if (!IsAotCompiler()) {
+ // Use !IsCompiler so that we get test coverage, tests are never the zygote.
+ if (!IsCompiler()) {
ScopedObjectAccess soa(self);
gc::space::ImageSpace* image_space = heap_->GetImageSpace();
if (image_space != nullptr) {
- GetInternTable()->AddImageStringsToTable(image_space);
- GetClassLinker()->MoveImageClassesToClassTable();
+ Runtime::Current()->GetInternTable()->AddImageStringsToTable(image_space);
+ Runtime::Current()->GetClassLinker()->MoveImageClassesToClassTable();
}
}
- // If we are the zygote then we need to wait until after forking to create the code cache due to
- // SELinux restrictions on r/w/x memory regions.
- if (!IsZygote() && jit_.get() != nullptr) {
- jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
- jit_->CreateThreadPool();
- }
-
- if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
+ if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
@@ -606,14 +584,8 @@ void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const ch
}
}
- // Create the thread pools.
+ // Create the thread pool.
heap_->CreateThreadPool();
- if (jit_options_.get() != nullptr && jit_.get() == nullptr) {
- // Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
- CreateJit();
- jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
- jit_->CreateThreadPool();
- }
StartSignalCatcher();
@@ -834,17 +806,6 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
}
- if (!IsCompiler()) {
- // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
- // this case.
- // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
- // nullptr and we don't create the jit.
- jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
- }
- if (!IsZygote() && jit_options_.get() != nullptr) {
- CreateJit();
- }
-
BlockSignals();
InitPlatformSignalHandlers();
@@ -1093,26 +1054,26 @@ void Runtime::InitThreadGroups(Thread* self) {
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != NULL || IsCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != NULL || IsCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != NULL || IsCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != NULL || IsCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsAotCompiler());
+ CHECK(system_class_loader_ != NULL || IsCompiler());
return system_class_loader_;
}
@@ -1368,7 +1329,7 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
// TODO: use a special method for imt conflict method saves.
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsAotCompiler()) {
+ if (runtime->IsCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1377,10 +1338,6 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
return method.Get();
}
-void Runtime::SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
-}
-
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
@@ -1391,7 +1348,7 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsAotCompiler()) {
+ if (runtime->IsCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1522,14 +1479,14 @@ void Runtime::StartProfiler(const char* profile_output_filename) {
// Transaction support.
void Runtime::EnterTransactionMode(Transaction* transaction) {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(transaction != nullptr);
DCHECK(!IsActiveTransaction());
preinitialization_transaction_ = transaction;
}
void Runtime::ExitTransactionMode() {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_ = nullptr;
}
@@ -1589,51 +1546,51 @@ void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offs
void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
uint32_t value, bool is_volatile) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
uint64_t value, bool is_volatile) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteArray(array, index, value);
}
void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringInsertion(s);
}
void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringInsertion(s);
}
void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringRemoval(s);
}
void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
- DCHECK(IsAotCompiler());
+ DCHECK(IsCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringRemoval(s);
}
@@ -1665,16 +1622,4 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin
void Runtime::UpdateProfilerState(int state) {
VLOG(profiler) << "Profiler state updated to " << state;
}
-
-void Runtime::CreateJit() {
- CHECK(jit_options_.get() != nullptr);
- std::string error_msg;
- jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
- if (jit_.get() != nullptr) {
- compiler_callbacks_ = jit_->GetCompilerCallbacks();
- } else {
- LOG(WARNING) << "Failed to create JIT " << error_msg;
- }
-}
-
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 3b6df51369..fb9ca401ac 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -48,12 +48,6 @@ namespace gc {
class GarbageCollector;
} // namespace collector
} // namespace gc
-
-namespace jit {
- class Jit;
- class JitOptions;
-} // namespace jit
-
namespace mirror {
class ArtMethod;
class ClassLoader;
@@ -101,18 +95,12 @@ class Runtime {
static bool Create(const RuntimeOptions& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
- // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
- bool IsAotCompiler() const {
- return !UseJit() && IsCompiler();
- }
-
- // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
bool IsCompiler() const {
return compiler_callbacks_ != nullptr;
}
bool CanRelocate() const {
- return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
+ return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
}
bool ShouldRelocate() const {
@@ -351,7 +339,9 @@ class Runtime {
return !imt_conflict_method_.IsNull();
}
- void SetImtConflictMethod(mirror::ArtMethod* method);
+ void SetImtConflictMethod(mirror::ArtMethod* method) {
+ imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
+ }
void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
}
@@ -431,14 +421,6 @@ class Runtime {
kUnload,
kInitialize
};
-
- jit::Jit* GetJit() {
- return jit_.get();
- }
- bool UseJit() const {
- return jit_.get() != nullptr;
- }
-
void PreZygoteFork();
bool InitZygote();
void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
@@ -539,8 +521,6 @@ class Runtime {
return zygote_max_failed_boots_;
}
- void CreateJit();
-
private:
static void InitPlatformSignalHandlers();
@@ -620,9 +600,6 @@ class Runtime {
JavaVMExt* java_vm_;
- std::unique_ptr<jit::Jit> jit_;
- std::unique_ptr<jit::JitOptions> jit_options_;
-
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 4d74d61461..71a0152956 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -63,9 +63,6 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
-RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
-RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
-RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 7e59000e09..ebd52d77de 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -26,8 +26,6 @@
#include "runtime/base/logging.h"
#include "cmdline/unit.h"
#include "jdwp/jdwp.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 26bf655ca9..e377542e45 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -180,7 +180,7 @@ void* SignalCatcher::Run(void* arg) {
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
- !runtime->IsAotCompiler()));
+ !runtime->IsCompiler()));
Thread* self = Thread::Current();
DCHECK_NE(self->GetState(), kRunnable);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 79d0066eef..3b48f49fb4 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -541,7 +541,7 @@ bool Thread::InitStackHwm() {
// Set stack_end_ to the bottom of the stack saving space of stack overflows
Runtime* runtime = Runtime::Current();
- bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
+ bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsCompiler();
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 93b3877bf4..0950abeb97 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -298,7 +298,7 @@ void* Trace::RunSamplingThread(void* arg) {
intptr_t interval_us = reinterpret_cast<intptr_t>(arg);
CHECK_GE(interval_us, 0);
CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsAotCompiler()));
+ !runtime->IsCompiler()));
while (true) {
usleep(interval_us);
@@ -627,12 +627,6 @@ void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
-void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method,
- int32_t /*dex_pc_offset*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
-}
-
void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
if (UseThreadCpuClock()) {
uint64_t clock_base = thread->GetTraceClockBase();
diff --git a/runtime/trace.h b/runtime/trace.h
index 9ba30d5f8f..ead1c29c72 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -99,8 +99,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
+
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<mirror::ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index e26f955f15..7e2e0a6c97 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -32,7 +32,7 @@ static constexpr bool kEnableTransactionStats = false;
Transaction::Transaction()
: log_lock_("transaction log lock", kTransactionLogLock), aborted_(false) {
- CHECK(Runtime::Current()->IsAotCompiler());
+ CHECK(Runtime::Current()->IsCompiler());
}
Transaction::~Transaction() {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index dea8f79399..85c9340d02 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1262,9 +1262,9 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
os << "+" << it->func_offset;
}
try_addr2line = true;
- } else if (
- current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_method->PcIsWithinQuickCode(it->pc)) {
+ } else if (current_method != nullptr &&
+ Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ current_method->PcIsWithinQuickCode(it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 87a29ed2f2..474a066da8 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -24,7 +24,6 @@
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "dex_instruction_utils.h"
#include "dex_instruction_visitor.h"
#include "gc/accounting/card_table-inl.h"
#include "indenter.h"
@@ -112,20 +111,6 @@ static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, Register
reg_line->MarkAllRegistersAsConflicts(verifier);
}
-MethodVerifier::FailureKind MethodVerifier::VerifyMethod(
- mirror::ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) {
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
- mirror::Class* klass = method->GetDeclaringClass();
- auto h_dex_cache(hs.NewHandle(klass->GetDexCache()));
- auto h_class_loader(hs.NewHandle(klass->GetClassLoader()));
- auto h_method = hs.NewHandle(method);
- return VerifyMethod(self, method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache,
- h_class_loader, klass->GetClassDef(), method->GetCodeItem(), h_method,
- method->GetAccessFlags(), allow_soft_failures, false);
-}
-
-
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -151,7 +136,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
}
if (early_failure) {
*error = "Verifier rejected class " + PrettyDescriptor(klass) + failure_message;
- if (Runtime::Current()->IsAotCompiler()) {
+ if (Runtime::Current()->IsCompiler()) {
ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -559,7 +544,7 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
case VERIFY_ERROR_ACCESS_METHOD:
case VERIFY_ERROR_INSTANTIATION:
case VERIFY_ERROR_CLASS_CHANGE:
- if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
+ if (Runtime::Current()->IsCompiler() || !can_load_classes_) {
// If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
// class change and instantiation errors into soft verification errors so that we re-verify
// at runtime. We may fail to find or to agree on access because of not yet available class
@@ -583,7 +568,7 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
// Hard verification failures at compile time will still fail at runtime, so the class is
// marked as rejected to prevent it from being compiled.
case VERIFY_ERROR_BAD_CLASS_HARD: {
- if (Runtime::Current()->IsAotCompiler()) {
+ if (Runtime::Current()->IsCompiler()) {
ClassReference ref(dex_file_, dex_file_->GetIndexForClassDef(*class_def_));
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -859,7 +844,7 @@ bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_of
result = false;
break;
}
- if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsAotCompiler() && !verify_to_dump_) {
+ if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsCompiler() && !verify_to_dump_) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
@@ -2827,8 +2812,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
if (have_pending_hard_failure_) {
- if (Runtime::Current()->IsAotCompiler()) {
- /* When AOT compiling, check that the last failure is a hard failure */
+ if (Runtime::Current()->IsCompiler()) {
+ /* When compiling, check that the last failure is a hard failure */
CHECK_EQ(failures_[failures_.size() - 1], VERIFY_ERROR_BAD_CLASS_HARD);
}
/* immediate failure, reject class */
@@ -3956,16 +3941,28 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType&
mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
RegisterLine* reg_line) {
- DCHECK(IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) << inst->Opcode();
+ DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
+ inst->Opcode() == Instruction::IGET_WIDE_QUICK ||
+ inst->Opcode() == Instruction::IGET_OBJECT_QUICK ||
+ inst->Opcode() == Instruction::IGET_BOOLEAN_QUICK ||
+ inst->Opcode() == Instruction::IGET_BYTE_QUICK ||
+ inst->Opcode() == Instruction::IGET_CHAR_QUICK ||
+ inst->Opcode() == Instruction::IGET_SHORT_QUICK ||
+ inst->Opcode() == Instruction::IPUT_QUICK ||
+ inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
+ inst->Opcode() == Instruction::IPUT_OBJECT_QUICK ||
+ inst->Opcode() == Instruction::IPUT_BOOLEAN_QUICK ||
+ inst->Opcode() == Instruction::IPUT_BYTE_QUICK ||
+ inst->Opcode() == Instruction::IPUT_CHAR_QUICK ||
+ inst->Opcode() == Instruction::IPUT_SHORT_QUICK);
const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
}
uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
- mirror::ArtField* const f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
- field_offset);
- DCHECK_EQ(f->GetOffset().Uint32Value(), field_offset);
+ mirror::ArtField* f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
+ field_offset);
if (f == nullptr) {
VLOG(verifier) << "Failed to find instance field at offset '" << field_offset
<< "' from '" << PrettyDescriptor(object_type.GetClass()) << "'";
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index bdd62596a6..b83e647ada 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -156,9 +156,6 @@ class MethodVerifier {
uint32_t method_access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FailureKind VerifyMethod(mirror::ArtMethod* method, bool allow_soft_failures,
- std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
uint8_t EncodePcToReferenceMapData() const;
uint32_t DexFileVersion() const {
@@ -242,14 +239,10 @@ class MethodVerifier {
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the method of a quick invoke or nullptr if it cannot be found.
- mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
+ mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst,
+ RegisterLine* reg_line,
bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
- // if it cannot be found.
- mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is the method being verified a constructor?
bool IsConstructor() const {
@@ -539,6 +532,11 @@ class MethodVerifier {
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // if it cannot be found.
+ mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 49ad1de3ce..c8e0ec5216 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -84,9 +84,6 @@ endif
ifeq ($(ART_TEST_INTERPRETER),true)
COMPILER_TYPES += interpreter
endif
-ifeq ($(ART_TEST_JIT),true)
- COMPILER_TYPES += jit
-endif
ifeq ($(ART_TEST_OPTIMIZING),true)
COMPILER_TYPES += optimizing
endif
@@ -499,12 +496,7 @@ define define-test-art-run-test
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
run_test_options += --quick
else
- ifeq ($(4),jit)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
- run_test_options += --jit
- else
- $$(error found $(4) expected $(COMPILER_TYPES))
- endif
+ $$(error found $(4) expected $(COMPILER_TYPES))
endif
endif
endif
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ee40ee8310..f64756b5de 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -26,7 +26,6 @@ GDB_SERVER="gdbserver"
HAVE_IMAGE="y"
HOST="n"
INTERPRETER="n"
-JIT="n"
INVOKE_WITH=""
ISA=x86
LIBRARY_DIRECTORY="lib"
@@ -128,9 +127,6 @@ while true; do
elif [ "x$1" = "x--interpreter" ]; then
INTERPRETER="y"
shift
- elif [ "x$1" = "x--jit" ]; then
- JIT="y"
- shift
elif [ "x$1" = "x--jvm" ]; then
USE_JVM="y"
shift
@@ -264,16 +260,6 @@ if [ "$INTERPRETER" = "y" ]; then
fi
fi
-if [ "$JIT" = "y" ]; then
- INT_OPTS="-Xjit"
- if [ "$VERIFY" = "y" ] ; then
- COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
- else
- COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
- DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
- fi
-fi
-
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
diff --git a/test/run-all-tests b/test/run-all-tests
index d0b3cf9abe..318a0de17e 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -53,9 +53,6 @@ while true; do
elif [ "x$1" = "x--interpreter" ]; then
run_args="${run_args} --interpreter"
shift
- elif [ "x$1" = "x--jit" ]; then
- run_args="${run_args} --jit"
- shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
@@ -129,7 +126,7 @@ if [ "$usage" = "yes" ]; then
echo " $prog [options] Run all tests with the given options."
echo " Options are all passed to run-test; refer to that for " \
"further documentation:"
- echo " --debug --dev --host --interpreter --jit --jvm --no-optimize"
+ echo " --debug --dev --host --interpreter --jvm --no-optimize"
echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
echo " --prebuild --always-clean --gcstress --gcverify --trace"
echo " --no-patchoat --no-dex2oat"
diff --git a/test/run-test b/test/run-test
index 8bc4151941..8c47663390 100755
--- a/test/run-test
+++ b/test/run-test
@@ -193,9 +193,6 @@ while true; do
run_args="${run_args} --interpreter"
image_suffix="-interpreter"
shift
- elif [ "x$1" = "x--jit" ]; then
- run_args="${run_args} --jit"
- shift
elif [ "x$1" = "x--optimizing" ]; then
run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
image_suffix="-optimizing"
@@ -433,7 +430,6 @@ if [ "$usage" = "yes" ]; then
echo " --gdb Run under gdb; incompatible with some tests."
echo " --build-only Build test files only (off by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
- echo " --jit Enable jit (off by default)."
echo " --optimizing Enable optimizing compiler (off by default)."
echo " --quick Use Quick compiler (default)."
echo " --no-verify Turn off verification (on by default)."