summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk4
-rw-r--r--build/Android.gtest.mk12
-rw-r--r--compiler/common_compiler_test.cc2
-rw-r--r--compiler/dex/mir_optimization.cc4
-rw-r--r--compiler/dex/quick/gen_common.cc20
-rw-r--r--compiler/dex/quick/mir_to_lir.cc17
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc7
-rw-r--r--compiler/driver/compiler_driver.cc33
-rw-r--r--compiler/driver/compiler_driver.h12
-rw-r--r--compiler/oat_test.cc4
-rw-r--r--compiler/optimizing/builder.cc479
-rw-r--r--compiler/optimizing/builder.h42
-rw-r--r--compiler/optimizing/code_generator.cc18
-rw-r--r--compiler/optimizing/code_generator.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc378
-rw-r--r--compiler/optimizing/code_generator_arm.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc676
-rw-r--r--compiler/optimizing/code_generator_arm64.h31
-rw-r--r--compiler/optimizing/code_generator_x86.cc456
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc436
-rw-r--r--compiler/optimizing/code_generator_x86_64.h6
-rw-r--r--compiler/optimizing/codegen_test.cc21
-rw-r--r--compiler/optimizing/nodes.h174
-rw-r--r--compiler/optimizing/register_allocator_test.cc2
-rw-r--r--compiler/optimizing/ssa_builder.cc5
-rw-r--r--compiler/optimizing/ssa_builder.h1
-rw-r--r--compiler/utils/arm/assembler_arm.h3
-rw-r--r--compiler/utils/arm/assembler_arm32.cc19
-rw-r--r--compiler/utils/arm/assembler_arm32.h2
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc119
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc21
-rw-r--r--compiler/utils/arm/assembler_thumb2.h2
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc123
-rw-r--r--compiler/utils/assembler_test.h15
-rw-r--r--compiler/utils/x86/assembler_x86.cc23
-rw-r--r--compiler/utils/x86/assembler_x86.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc121
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h12
-rw-r--r--dex2oat/dex2oat.cc42
-rw-r--r--oatdump/oatdump.cc18
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S3
-rw-r--r--runtime/base/logging.cc24
-rw-r--r--runtime/base/logging.h4
-rw-r--r--runtime/base/mutex.cc15
-rw-r--r--runtime/base/mutex.h12
-rw-r--r--runtime/class_linker_test.cc1
-rw-r--r--runtime/debugger.cc14
-rw-r--r--runtime/dex_method_iterator_test.cc8
-rw-r--r--runtime/fault_handler.cc49
-rw-r--r--runtime/gc/space/image_space.cc12
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/java_vm_ext.cc6
-rw-r--r--runtime/java_vm_ext_test.cc132
-rw-r--r--runtime/jdwp/jdwp_socket.cc2
-rw-r--r--runtime/jni_internal_test.cc20
-rw-r--r--runtime/mirror/class.h3
-rw-r--r--runtime/monitor.cc2
-rw-r--r--runtime/native/dalvik_system_VMStack.cc7
-rw-r--r--runtime/native/java_lang_Thread.cc6
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc7
-rw-r--r--runtime/oat.cc2
-rw-r--r--runtime/thread-inl.h2
-rw-r--r--runtime/thread.cc46
-rw-r--r--runtime/thread.h4
-rw-r--r--runtime/thread_list.cc12
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/utils.cc3
-rw-r--r--test/411-optimizing-arith/src/Main.java2
-rw-r--r--test/415-optimizing-arith-neg/src/Main.java16
-rw-r--r--test/416-optimizing-arith-not/src/Main.java24
-rw-r--r--test/417-optimizing-arith-div/src/Main.java60
-rw-r--r--test/422-type-conversion/src/Main.java180
-rw-r--r--test/424-checkcast/expected.txt0
-rw-r--r--test/424-checkcast/info.txt1
-rw-r--r--test/424-checkcast/src/Main.java73
-rw-r--r--test/425-invoke-super/expected.txt0
-rw-r--r--test/425-invoke-super/info.txt1
-rw-r--r--test/425-invoke-super/smali/invokesuper.smali40
-rw-r--r--test/425-invoke-super/smali/subclass.smali29
-rw-r--r--test/425-invoke-super/smali/superclass.smali29
-rw-r--r--test/425-invoke-super/src/Main.java53
-rw-r--r--test/426-monitor/expected.txt5
-rw-r--r--test/426-monitor/info.txt1
-rw-r--r--test/426-monitor/src/Main.java52
-rw-r--r--test/427-bitwise/expected.txt0
-rw-r--r--test/427-bitwise/info.txt1
-rw-r--r--test/427-bitwise/src/Main.java233
-rw-r--r--test/427-bounds/expected.txt0
-rw-r--r--test/427-bounds/info.txt2
-rw-r--r--test/427-bounds/src/Main.java51
-rw-r--r--test/800-smali/expected.txt1
-rwxr-xr-xtest/800-smali/smali/negLong.smali186
-rw-r--r--test/800-smali/src/Main.java1
-rw-r--r--test/Android.run-test.mk28
97 files changed, 3978 insertions, 879 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 7e58f5c2bb..a221cfc586 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -173,7 +173,9 @@ art_gcc_cflags := -Wunused-but-set-parameter
ifeq ($(ART_HOST_CLANG),true)
- ART_HOST_CFLAGS += $(art_clang_cflags)
+ # Bug: 15446488. We don't omit the frame pointer to work around
+ # clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
+ ART_HOST_CFLAGS += $(art_clang_cflags) -fno-omit-frame-pointer
else
ART_HOST_CFLAGS += $(art_gcc_cflags)
endif
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1b4616f21a..a8041c5365 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,10 +61,9 @@ ART_GTEST_transaction_test_DEX_DEPS := Transaction
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
-ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+
+# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_dex_method_iterator_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -113,6 +112,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/instruction_set_test.cc \
runtime/intern_table_test.cc \
runtime/interpreter/safe_math_test.cc \
+ runtime/java_vm_ext_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
@@ -186,7 +186,9 @@ COMPILER_GTEST_TARGET_SRC_FILES := \
COMPILER_GTEST_HOST_SRC_FILES := \
$(COMPILER_GTEST_COMMON_SRC_FILES) \
- compiler/utils//assembler_thumb_test.cc \
+ compiler/utils/arm/assembler_arm32_test.cc \
+ compiler/utils/arm/assembler_thumb2_test.cc \
+ compiler/utils/assembler_thumb_test.cc \
compiler/utils/x86/assembler_x86_test.cc \
compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -218,7 +220,7 @@ LOCAL_CFLAGS := $(ART_HOST_CFLAGS)
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
-LOCAL_STATIC_LIBRARIES := libgtest_libc++_host
+LOCAL_STATIC_LIBRARIES := libgtest_host
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index bfdb537427..085d169c6d 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -156,7 +156,7 @@ void CommonCompilerTest::SetUp() {
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features_.get(),
- true, new std::set<std::string>,
+ true, new std::set<std::string>, nullptr,
2, true, true, timer_.get(), ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index b35d51c46e..a0ad2133be 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -771,11 +771,11 @@ void MIRGraph::CombineBlocks(class BasicBlock* bb) {
if ((df_attributes & DF_IFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(throw_insn);
- ok = (df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet();
+ ok = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
} else if ((df_attributes & DF_SFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(throw_insn);
- bool fast = ((df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet());
+ bool fast = ((df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut());
// Don't combine if the SGET/SPUT can call <clinit>().
bool clinit = !field_info.IsClassInitialized() &&
(throw_insn->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 12a21d0238..7674e46fc0 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -429,7 +429,11 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ if (loc.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ } else {
+ Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ }
}
}
/*
@@ -485,9 +489,17 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
} else if (!info->is_range) {
// TUNING: interleave
for (int i = 0; i < elems; i++) {
- RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
- Store32Disp(ref_reg,
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ RegLocation rl_arg;
+ if (info->args[i].ref) {
+ rl_arg = LoadValue(info->args[i], kRefReg);
+ StoreRefDisp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg,
+ kNotVolatile);
+ } else {
+ rl_arg = LoadValue(info->args[i], kCoreReg);
+ Store32Disp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ }
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index ccaa167d6a..92ef70db7e 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -417,10 +417,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
RegLocation rl_result = mir_graph_->GetBadLoc();
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int opt_flags = mir->optimization_flags;
- uint32_t vB = mir->dalvikInsn.vB;
- uint32_t vC = mir->dalvikInsn.vC;
+ const Instruction::Code opcode = mir->dalvikInsn.opcode;
+ const int opt_flags = mir->optimization_flags;
+ const uint32_t vB = mir->dalvikInsn.vB;
+ const uint32_t vC = mir->dalvikInsn.vC;
DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
<< std::hex << current_dalvik_offset_;
@@ -572,7 +572,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenThrow(rl_src[0]);
break;
- case Instruction::ARRAY_LENGTH:
+ case Instruction::ARRAY_LENGTH: {
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kRefReg);
@@ -582,7 +582,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
MarkPossibleNullPointerException(opt_flags);
StoreValue(rl_dest, rl_result);
break;
-
+ }
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
GenConstString(vB, rl_dest);
@@ -666,8 +666,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
- }
-
+ }
case Instruction::IF_EQZ:
case Instruction::IF_NEZ:
case Instruction::IF_LTZ:
@@ -693,7 +692,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
- }
+ }
case Instruction::AGET_WIDE:
GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index bc02eee669..4825db6402 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -584,16 +584,16 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
- rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegCopy(rl_result.reg, rl_src.reg);
// Flip sign bit.
NewLIR2(kX86Rol64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Xor64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Ror64RI, rl_result.reg.GetReg(), 1);
} else {
- OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
- OpRegCopy(rl_result.reg, rl_src.reg);
+ rl_result = ForceTempWide(rl_src);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), 0x80000000);
}
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 781c12807b..3f501b4cd3 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -2250,13 +2250,6 @@ void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
} else {
rl_result = ForceTempWide(rl_src);
- if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
- ((rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()))) {
- // The registers are the same, so we would clobber it before the use.
- RegStorage temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.reg);
- rl_result.reg.SetHighReg(temp_reg.GetReg());
- }
OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow()); // rLow = -rLow
OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0); // rHigh = rHigh + CF
OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); // rHigh = -rHigh
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index aab94c000f..08041e8f24 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -330,7 +330,8 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool image, std::set<std::string>* image_classes, size_t thread_count,
+ bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes, size_t thread_count,
bool dump_stats, bool dump_passes, CumulativeLogger* timer,
const std::string& profile_file)
: profile_present_(false), compiler_options_(compiler_options),
@@ -346,6 +347,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
non_relative_linker_patch_count_(0u),
image_(image),
image_classes_(image_classes),
+ classes_to_compile_(compiled_classes),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
@@ -570,7 +572,7 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings
class_def);
}
CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
- *dex_file, dex_to_dex_compilation_level);
+ *dex_file, dex_to_dex_compilation_level, true);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
@@ -613,6 +615,17 @@ bool CompilerDriver::IsImageClass(const char* descriptor) const {
}
}
+bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
+ if (!IsImage()) {
+ return true;
+ } else {
+ if (classes_to_compile_ == nullptr) {
+ return true;
+ }
+ return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ }
+}
+
static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1916,6 +1929,10 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
it.Next();
}
CompilerDriver* driver = manager->GetCompiler();
+
+ bool compilation_enabled = driver->IsClassToCompile(
+ dex_file.StringByTypeIdx(class_def.class_idx_));
+
// Compile direct methods
int64_t previous_direct_method_idx = -1;
while (it.HasNextDirectMethod()) {
@@ -1929,7 +1946,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
previous_direct_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
// Compile virtual methods
@@ -1945,7 +1963,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
previous_virtual_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
DCHECK(!it.HasNext());
@@ -1977,7 +1996,8 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
InvokeType invoke_type, uint16_t class_def_idx,
uint32_t method_idx, jobject class_loader,
const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
@@ -1994,7 +2014,8 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
// Abstract methods don't have code.
} else {
MethodReference method_ref(&dex_file, method_idx);
- bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
+ bool compile = compilation_enabled &&
+ verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
// NOTE: if compiler declines to compile this method, it will return nullptr.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 682b17a7d1..ddb2342b42 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -91,6 +91,7 @@ class CompilerDriver {
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes,
size_t thread_count, bool dump_stats, bool dump_passes,
CumulativeLogger* timer, const std::string& profile_file);
@@ -374,6 +375,9 @@ class CompilerDriver {
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
+ // Checks if the provided class should be compiled, i.e., is in classes_to_compile_.
+ bool IsClassToCompile(const char* descriptor) const;
+
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -475,7 +479,8 @@ class CompilerDriver {
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level)
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled)
LOCKS_EXCLUDED(compiled_methods_lock_);
static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
@@ -514,6 +519,11 @@ class CompilerDriver {
// included in the image.
std::unique_ptr<std::set<std::string>> image_classes_;
+ // If image_ is true, specifies the classes that will be compiled in
+ // the image. Note if classes_to_compile_ is nullptr, all classes are
+ // included in the image.
+ std::unique_ptr<std::set<std::string>> classes_to_compile_;
+
size_t thread_count_;
class AOTCompilationStats;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 97b7cc90dd..c384c57c3a 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -109,8 +109,8 @@ TEST_F(OatTest, WriteRead) {
verification_results_.get(),
method_inliner_map_.get(),
compiler_kind, insn_set,
- insn_features.get(), false, nullptr, 2, true, true,
- timer_.get(), ""));
+ insn_features.get(), false, nullptr, nullptr, 2, true,
+ true, timer_.get(), ""));
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8418ab0a7e..df3d57ebdf 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -41,25 +41,29 @@ namespace art {
*/
class Temporaries : public ValueObject {
public:
- Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
- graph_->UpdateNumberOfTemporaries(count_);
- }
+ explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
void Add(HInstruction* instruction) {
- // We currently only support vreg size temps.
- DCHECK(instruction->GetType() != Primitive::kPrimLong
- && instruction->GetType() != Primitive::kPrimDouble);
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_);
instruction->GetBlock()->AddInstruction(temp);
+
DCHECK(temp->GetPrevious() == instruction);
+
+ size_t offset;
+ if (instruction->GetType() == Primitive::kPrimLong
+ || instruction->GetType() == Primitive::kPrimDouble) {
+ offset = 2;
+ } else {
+ offset = 1;
+ }
+ index_ += offset;
+
+ graph_->UpdateTemporariesVRegSlots(index_);
}
private:
HGraph* const graph_;
- // The total number of temporaries that will be used.
- const size_t count_;
-
// Current index in the temporary stack, updated by `Add`.
size_t index_;
};
@@ -115,37 +119,37 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
}
template<typename T>
-void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
T* comparison = new (arena_) T(first, second);
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
}
template<typename T>
-void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
T* comparison = new (arena_) T(value, GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
@@ -192,13 +196,13 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
InitializeParameters(code_item.ins_size_);
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
- // Update the current block if dex_offset starts a new block.
- MaybeUpdateCurrentBlock(dex_offset);
+ // Update the current block if dex_pc starts a new block.
+ MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_offset)) return nullptr;
- dex_offset += instruction.SizeInCodeUnits();
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -239,25 +243,25 @@ void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr, const uint16_
// Iterate over all instructions and find branching instructions. Create blocks for
// the locations these instructions branch to.
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
const Instruction& instruction = *Instruction::At(code_ptr);
if (instruction.IsBranch()) {
- int32_t target = instruction.GetTargetOffset() + dex_offset;
+ int32_t target = instruction.GetTargetOffset() + dex_pc;
// Create a block for the target instruction.
if (FindBlockStartingAt(target) == nullptr) {
block = new (arena_) HBasicBlock(graph_, target);
branch_targets_.Put(target, block);
}
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_offset) == nullptr)) {
- block = new (arena_) HBasicBlock(graph_, dex_offset);
- branch_targets_.Put(dex_offset, block);
+ if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+ block = new (arena_) HBasicBlock(graph_, dex_pc);
+ branch_targets_.Put(dex_pc, block);
}
} else {
code_ptr += instruction.SizeInCodeUnits();
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
}
}
}
@@ -291,6 +295,16 @@ void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type ty
}
template<typename T>
+void HGraphBuilder::Binop_23x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
@@ -299,6 +313,16 @@ void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type ty
}
template<typename T>
+void HGraphBuilder::Binop_12x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
HInstruction* second = GetIntConstant(instruction.VRegC_22s());
@@ -332,7 +356,7 @@ void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type
}
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -374,39 +398,44 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual || invoke_type == kInterface) {
+ if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
uintptr_t direct_method;
int table_index;
InvokeType optimized_invoke_type = invoke_type;
- // TODO: Add devirtualization support.
- compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method);
if (table_index == -1) {
return false;
}
- if (invoke_type == kVirtual) {
+ if (optimized_invoke_type == kVirtual) {
invoke = new (arena_) HInvokeVirtual(
- arena_, number_of_arguments, return_type, dex_offset, table_index);
- } else {
- DCHECK_EQ(invoke_type, kInterface);
+ arena_, number_of_arguments, return_type, dex_pc, table_index);
+ } else if (optimized_invoke_type == kInterface) {
invoke = new (arena_) HInvokeInterface(
- arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
+ } else if (optimized_invoke_type == kDirect) {
+ // For this compiler, sharpening only works if we compile PIC.
+ DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
}
} else {
+ DCHECK(invoke_type == kDirect || invoke_type == kStatic);
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx);
}
size_t start_index = 0;
- Temporaries temps(graph_, is_instance_call ? 1 : 0);
+ Temporaries temps(graph_);
if (is_instance_call) {
HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
invoke->SetArgumentAt(0, null_check);
@@ -420,7 +449,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
if (!is_range && is_wide && args[i] + 1 != args[i + 1]) {
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
- << " at " << dex_offset;
+ << " at " << dex_pc;
// We do not implement non sequential register pair.
return false;
}
@@ -438,7 +467,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
}
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
@@ -459,9 +488,9 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_offset));
+ current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
if (is_put) {
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
@@ -485,7 +514,7 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -515,18 +544,18 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, dex_offset);
+ storage_index, is_referrers_class, dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
if (!is_initialized) {
- cls = new (arena_) HClinitCheck(constant, dex_offset);
+ cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
if (is_put) {
// We need to keep the class alive before loading the value.
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(cls);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(value->GetType(), field_type);
@@ -539,29 +568,41 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
return true;
}
-void HGraphBuilder::BuildCheckedDiv(uint16_t out_reg,
- uint16_t first_reg,
- int32_t second_reg,
- uint32_t dex_offset,
+void HGraphBuilder::BuildCheckedDiv(uint16_t out_vreg,
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
- bool second_is_lit) {
- DCHECK(type == Primitive::kPrimInt);
-
- HInstruction* first = LoadLocal(first_reg, type);
- HInstruction* second = second_is_lit ? GetIntConstant(second_reg) : LoadLocal(second_reg, type);
- if (!second->IsIntConstant() || (second->AsIntConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_offset);
- Temporaries temps(graph_, 1);
+ bool second_is_constant) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ HInstruction* first = LoadLocal(first_vreg, type);
+ HInstruction* second = nullptr;
+ if (second_is_constant) {
+ if (type == Primitive::kPrimInt) {
+ second = GetIntConstant(second_vreg_or_constant);
+ } else {
+ second = GetLongConstant(second_vreg_or_constant);
+ }
+ } else {
+ second = LoadLocal(second_vreg_or_constant, type);
+ }
+
+ if (!second_is_constant
+ || (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0)
+ || (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_pc);
+ Temporaries temps(graph_);
current_block_->AddInstruction(second);
temps.Add(current_block_->GetLastInstruction());
}
- current_block_->AddInstruction(new (arena_) HDiv(type, first, second));
- UpdateLocal(out_reg, current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put,
Primitive::Type anticipated_type) {
uint8_t source_or_dest_reg = instruction.VRegA_23x();
@@ -569,10 +610,10 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint8_t index_reg = instruction.VRegC_23x();
// We need one temporary for the null check, one for the index, and one for the length.
- Temporaries temps(graph_, 3);
+ Temporaries temps(graph_);
HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
temps.Add(object);
@@ -580,28 +621,28 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
current_block_->AddInstruction(length);
temps.Add(length);
HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
- index = new (arena_) HBoundsCheck(index, length, dex_offset);
+ index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
temps.Add(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
-void HGraphBuilder::BuildFilledNewArray(uint32_t dex_offset,
+void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = GetIntConstant(number_of_vreg_arguments);
- HInstruction* object = new (arena_) HNewArray(length, dex_offset, type_index);
+ HInstruction* object = new (arena_) HNewArray(length, dex_pc, type_index);
current_block_->AddInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -613,13 +654,13 @@ void HGraphBuilder::BuildFilledNewArray(uint32_t dex_offset,
bool is_reference_array = (primitive == 'L') || (primitive == '[');
Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt;
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = GetIntConstant(i);
current_block_->AddInstruction(
- new (arena_) HArraySet(object, index, value, type, dex_offset));
+ new (arena_) HArraySet(object, index, value, type, dex_pc));
}
latest_result_ = object;
}
@@ -629,26 +670,26 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetIntConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
}
}
-void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset) {
- Temporaries temps(graph_, 1);
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
+ Temporaries temps(graph_);
HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
HInstruction* length = new (arena_) HArrayLength(null_check);
current_block_->AddInstruction(length);
- int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
+ int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
const uint8_t* data = payload->data;
@@ -657,7 +698,7 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = GetIntConstant(payload->element_count - 1);
- current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -665,27 +706,27 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
reinterpret_cast<const int8_t*>(data),
element_count,
Primitive::kPrimByte,
- dex_offset);
+ dex_pc);
break;
case 2:
BuildFillArrayData(null_check,
reinterpret_cast<const int16_t*>(data),
element_count,
Primitive::kPrimShort,
- dex_offset);
+ dex_pc);
break;
case 4:
BuildFillArrayData(null_check,
reinterpret_cast<const int32_t*>(data),
element_count,
Primitive::kPrimInt,
- dex_offset);
+ dex_pc);
break;
case 8:
BuildFillWideArrayData(null_check,
reinterpret_cast<const int64_t*>(data),
element_count,
- dex_offset);
+ dex_pc);
break;
default:
LOG(FATAL) << "Unknown element width for " << payload->element_width;
@@ -695,24 +736,56 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetLongConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, Primitive::kPrimLong, dex_offset));
+ object, index, value, Primitive::kPrimLong, dex_pc));
}
}
-void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
+bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc) {
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_);
+ temps.Add(cls);
+ if (instruction.Opcode() == Instruction::INSTANCE_OF) {
+ current_block_->AddInstruction(
+ new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
+ UpdateLocal(destination, current_block_->GetLastInstruction());
+ } else {
+ DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ current_block_->AddInstruction(
+ new (arena_) HCheckCast(object, cls, type_known_final, dex_pc));
+ }
+ return true;
+}
+
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc) {
if (target_offset <= 0) {
// Unconditionnally add a suspend check to backward branches. We can remove
// them after we recognize loops in the graph.
- current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+ current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_pc));
}
}
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
}
@@ -815,8 +888,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
#define IF_XX(comparison, cond) \
- case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_offset); break; \
- case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_offset); break
+ case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_pc); break; \
+ case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_pc); break
IF_XX(HEqual, EQ);
IF_XX(HNotEqual, NE);
@@ -829,8 +902,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
int32_t offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(offset, dex_offset);
- HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
+ PotentiallyAddSuspendCheck(offset, dex_pc);
+ HBasicBlock* target = FindBlockStartingAt(offset + dex_pc);
DCHECK(target != nullptr);
current_block_->AddInstruction(new (arena_) HGoto());
current_block_->AddSuccessor(target);
@@ -858,29 +931,31 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_INTERFACE: {
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, false, args, -1)) {
return false;
}
break;
}
- case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_INTERFACE_RANGE: {
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, true, nullptr, register_index)) {
return false;
}
@@ -922,6 +997,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::LONG_TO_INT: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::INT_TO_BYTE: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -989,17 +1074,53 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1060,17 +1181,53 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT_2ADDR: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG_2ADDR: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1079,6 +1236,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::AND_INT_LIT16: {
+ Binop_22s<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT16: {
+ Binop_22s<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT16: {
+ Binop_22s<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT: {
Binop_22s<HSub>(instruction, true);
break;
@@ -1094,6 +1266,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::AND_INT_LIT8: {
+ Binop_22b<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT8: {
+ Binop_22b<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT8: {
+ Binop_22b<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT_LIT8: {
Binop_22b<HSub>(instruction, true);
break;
@@ -1107,13 +1294,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT_LIT16:
case Instruction::DIV_INT_LIT8: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, true);
+ dex_pc, Primitive::kPrimInt, true);
break;
}
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
- new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
+ new (arena_) HNewInstance(dex_pc, instruction.VRegB_21c()));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
break;
}
@@ -1121,7 +1308,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::NEW_ARRAY: {
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
current_block_->AddInstruction(
- new (arena_) HNewArray(length, dex_offset, instruction.VRegC_22c()));
+ new (arena_) HNewArray(length, dex_pc, instruction.VRegC_22c()));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
}
@@ -1131,7 +1318,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint32_t type_index = instruction.VRegB_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- BuildFilledNewArray(dex_offset, type_index, number_of_vreg_arguments, false, args, 0);
+ BuildFilledNewArray(dex_pc, type_index, number_of_vreg_arguments, false, args, 0);
break;
}
@@ -1140,12 +1327,12 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint32_t type_index = instruction.VRegB_3rc();
uint32_t register_index = instruction.VRegC_3rc();
BuildFilledNewArray(
- dex_offset, type_index, number_of_vreg_arguments, true, nullptr, register_index);
+ dex_pc, type_index, number_of_vreg_arguments, true, nullptr, register_index);
break;
}
case Instruction::FILL_ARRAY_DATA: {
- BuildFillArrayData(instruction, dex_offset);
+ BuildFillArrayData(instruction, dex_pc);
break;
}
@@ -1171,7 +1358,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1184,7 +1371,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1197,7 +1384,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1210,7 +1397,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1218,11 +1405,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
#define ARRAY_XX(kind, anticipated_type) \
case Instruction::AGET##kind: { \
- BuildArrayAccess(instruction, dex_offset, false, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, false, anticipated_type); \
break; \
} \
case Instruction::APUT##kind: { \
- BuildArrayAccess(instruction, dex_offset, true, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, true, anticipated_type); \
break; \
}
@@ -1238,7 +1425,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
// No need for a temporary for the null check, it is the only input of the following
// instruction.
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
@@ -1246,13 +1433,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::CONST_STRING: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -1269,7 +1456,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset));
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1282,7 +1469,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HThrow(exception, dex_offset));
+ current_block_->AddInstruction(new (arena_) HThrow(exception, dex_pc));
// A throw instruction must branch to the exit block.
current_block_->AddSuccessor(exit_block_);
// We finished building this block. Set the current block to null to avoid
@@ -1292,25 +1479,37 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::INSTANCE_OF: {
+ uint8_t destination = instruction.VRegA_22c();
+ uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- bool type_known_final;
- bool type_known_abstract;
- bool is_referrers_class;
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
- if (!can_access) {
+ if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
return false;
}
- HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
- current_block_->AddInstruction(cls);
- // The class needs a temporary before being used by the type check.
- Temporaries temps(graph_, 1);
- temps.Add(cls);
- current_block_->AddInstruction(
- new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
- UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CHECK_CAST: {
+ uint8_t reference = instruction.VRegA_21c();
+ uint16_t type_index = instruction.VRegB_21c();
+ if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::MONITOR_ENTER: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kEnter,
+ dex_pc));
+ break;
+ }
+
+ case Instruction::MONITOR_EXIT: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kExit,
+ dex_pc));
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 09c9a51260..799e628a78 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -76,7 +76,7 @@ class HGraphBuilder : public ValueObject {
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
// be handled.
- bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
+ bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc);
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
@@ -92,7 +92,7 @@ class HGraphBuilder : public ValueObject {
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
- void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
+ void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc);
void InitializeParameters(uint16_t number_of_parameters);
template<typename T>
@@ -102,16 +102,22 @@ class HGraphBuilder : public ValueObject {
void Binop_23x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_22b(const Instruction& instruction, bool reverse);
template<typename T>
void Binop_22s(const Instruction& instruction, bool reverse);
- template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
- template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_pc);
+ template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc);
void Conversion_12x(const Instruction& instruction,
Primitive::Type input_type,
@@ -119,27 +125,27 @@ class HGraphBuilder : public ValueObject {
void BuildCheckedDiv(uint16_t out_reg,
uint16_t first_reg,
- int32_t second_reg, // can be a constant
- uint32_t dex_offset,
+ int64_t second_reg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
bool second_is_lit);
void BuildReturn(const Instruction& instruction, Primitive::Type type);
// Builds an instance field access node and returns whether the instruction is supported.
- bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
// Builds a static field access node and returns whether the instruction is supported.
- bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
void BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_get,
Primitive::Type anticipated_type);
// Builds an invocation node and returns whether the instruction is supported.
bool BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -147,14 +153,14 @@ class HGraphBuilder : public ValueObject {
uint32_t register_index);
// Builds a new array node and the instructions that fill it.
- void BuildFilledNewArray(uint32_t dex_offset,
+ void BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index);
- void BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset);
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. Currently only used for non-reference and non-floating point
@@ -164,14 +170,22 @@ class HGraphBuilder : public ValueObject {
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset);
+ uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. The data must be for long and double arrays.
void BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset);
+ uint32_t dex_pc);
+
+ // Builds a `HInstanceOf`, or a `HCheckCast` instruction.
+ // Returns whether we succeeded in building the instruction.
+ bool BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc);
ArenaAllocator* const arena_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9d172638e1..4d71cb780a 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -51,7 +51,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
MarkNotLeaf();
}
ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetNumberOfTemporaries()
+ + GetGraph()->GetTemporariesVRegSlots()
+ 1 /* filler */,
0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
@@ -150,12 +150,15 @@ void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots,
Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ // The type of the previous instruction tells us if we need a single or double stack slot.
+ Primitive::Type type = temp->GetType();
+ int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
// Use the temporary region (right below the dex registers).
int32_t slot = GetFrameSize() - FrameEntrySpillSize()
- kVRegSize // filler
- (number_of_locals * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
+ - ((temp_size + temp->GetIndex()) * kVRegSize);
+ return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
}
int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
@@ -632,4 +635,13 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend
}
}
+void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+ MoveOperands move1(from1, to1, nullptr);
+ MoveOperands move2(from2, to2, nullptr);
+ HParallelMove parallel_move(GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ GetMoveResolver()->EmitNativeCode(&parallel_move);
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fc4ea4b5d3..ac4fc67c2c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -33,6 +33,7 @@ static size_t constexpr kUninitializedFrameSize = 0;
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
+class ParallelMoveResolver;
class SrcMap;
class CodeAllocator {
@@ -165,6 +166,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+
protected:
CodeGenerator(HGraph* graph,
size_t number_of_core_registers,
@@ -197,6 +200,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6218fc973a..7444506771 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -22,9 +22,9 @@
#include "mirror/art_method.h"
#include "mirror/class.h"
#include "thread.h"
-#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
#include "utils/stack_checks.h"
namespace art {
@@ -41,7 +41,7 @@ static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr SRegister kRuntimeParameterFpuRegisters[] = { };
@@ -169,11 +169,14 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
@@ -269,13 +272,19 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathARM(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
@@ -284,28 +293,29 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -660,13 +670,13 @@ void CodeGeneratorARM::Move32(Location destination, Location source) {
__ LoadSFromOffset(destination.As<SRegister>(), SP, source.GetStackIndex());
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
__ StoreSToOffset(source.As<SRegister>(), SP, destination.GetStackIndex());
} else {
- DCHECK(source.IsStackSlot());
+ DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
@@ -768,26 +778,29 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
return;
}
- if (instruction->IsIntConstant()) {
- int32_t value = instruction->AsIntConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.As<Register>(), value);
- } else {
- DCHECK(location.IsStackSlot());
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
- __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
- } else {
- DCHECK(location.IsDoubleStackSlot());
- __ LoadImmediate(IP, Low32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- __ LoadImmediate(IP, High32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ int32_t value = const_to_move->AsIntConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.As<Register>(), value);
+ } else {
+ DCHECK(location.IsStackSlot());
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
+ } else {
+ DCHECK(location.IsDoubleStackSlot());
+ __ LoadImmediate(IP, Low32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ __ LoadImmediate(IP, High32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ }
}
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
@@ -812,7 +825,12 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1333,11 +1351,48 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1355,7 +1410,6 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1375,11 +1429,56 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ __ sbfx(out.As<Register>(), in.As<Register>(), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ LoadFromOffset(kLoadWord, out.As<Register>(), SP, in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ LoadImmediate(out.As<Register>(), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
@@ -1402,7 +1501,6 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1634,8 +1732,11 @@ void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
}
void LocationsBuilderARM::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -1644,7 +1745,13 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // The runtime helper puts the output in R0,R2.
+ locations->SetOut(Location::RegisterPairLocation(R0, R2));
break;
}
case Primitive::kPrimFloat:
@@ -1673,7 +1780,15 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(R2, out.AsRegisterPairHigh<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc());
break;
}
@@ -1697,7 +1812,7 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1710,9 +1825,36 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- DCHECK(value.IsRegister()) << value;
- __ cmp(value.As<Register>(), ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ __ orrs(IP,
+ value.AsRegisterPairLow<Register>(),
+ ShifterOperand(value.AsRegisterPairHigh<Register>()));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+ }
}
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
@@ -2656,7 +2798,7 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
-void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2666,7 +2808,7 @@ void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Register cls = locations->InAt(1).As<Register>();
@@ -2691,7 +2833,7 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
@@ -2705,5 +2847,121 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ bool output_overlaps = (instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
+}
+
+void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first = locations->InAt(0).As<Register>();
+ Register second = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ if (instruction->IsAnd()) {
+ __ and_(out, first, ShifterOperand(second));
+ } else if (instruction->IsOr()) {
+ __ orr(out, first, ShifterOperand(second));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out, first, ShifterOperand(second));
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+ if (instruction->IsAnd()) {
+ __ and_(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ and_(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else if (instruction->IsOr()) {
+ __ orr(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ orr(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ eor(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
+ }
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5d519937f4..acc3fd6a25 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -105,9 +105,10 @@ class LocationsBuilderARM : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
- private:
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -133,6 +134,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
@@ -186,7 +188,7 @@ class CodeGeneratorARM : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverARM* GetMoveResolver() {
+ ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f9cf7d87af..887a4efa19 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -38,15 +38,20 @@ namespace art {
namespace arm64 {
-static bool IsFPType(Primitive::Type type) {
- return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
-}
-
// TODO: clean-up some of the constant definitions.
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
namespace {
+
+bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+bool Is64BitType(Primitive::Type type) {
+ return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+}
+
// Convenience helpers to ease conversion to and from VIXL operands.
int VIXLRegCodeFromART(int code) {
@@ -101,6 +106,28 @@ Register InputRegisterAt(HInstruction* instr, int input_index) {
instr->InputAt(input_index)->GetType());
}
+FPRegister DRegisterFrom(Location location) {
+ return FPRegister::DRegFromCode(location.reg());
+}
+
+FPRegister SRegisterFrom(Location location) {
+ return FPRegister::SRegFromCode(location.reg());
+}
+
+FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(IsFPType(type));
+ return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
+}
+
+FPRegister OutputFPRegister(HInstruction* instr) {
+ return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+ return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
int64_t Int64ConstantFrom(Location location) {
HConstant* instr = location.GetConstant();
return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
@@ -138,6 +165,10 @@ Location LocationFrom(const Register& reg) {
return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
}
+Location LocationFrom(const FPRegister& fpreg) {
+ return Location::FpuRegisterLocation(fpreg.code());
+}
+
} // namespace
inline Condition ARM64Condition(IfCondition cond) {
@@ -154,6 +185,22 @@ inline Condition ARM64Condition(IfCondition cond) {
return nv; // Unreachable.
}
+Location ARM64ReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
+ // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
+ // but we use the exact registers for clarity.
+ if (return_type == Primitive::kPrimFloat) {
+ return LocationFrom(s0);
+ } else if (return_type == Primitive::kPrimDouble) {
+ return LocationFrom(d0);
+ } else if (return_type == Primitive::kPrimLong) {
+ return LocationFrom(x0);
+ } else {
+ return LocationFrom(w0);
+ }
+}
+
static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -177,11 +224,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegi
};
Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
- DCHECK_NE(return_type, Primitive::kPrimVoid);
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
- return LocationFrom(x0);
+ return ARM64ReturnLocation(return_type);
}
#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
@@ -289,35 +332,25 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
LOG(FATAL) << "Unreachable type " << type;
}
- if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented type " << type;
+ if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
+ } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
}
- if (gp_index_ < calling_convention.GetNumberOfRegisters()) {
- next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_));
- if (type == Primitive::kPrimLong) {
- // Double stack slot reserved on the stack.
- stack_index_++;
- }
- } else { // Stack.
- if (type == Primitive::kPrimLong) {
- next_location = Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_));
- // Double stack slot reserved on the stack.
- stack_index_++;
- } else {
- next_location = Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_));
- }
- }
- // Move to the next register/stack slot.
- gp_index_++;
- stack_index_++;
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Is64BitType(type) ? 2 : 1;
return next_location;
}
CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
: CodeGenerator(graph,
kNumberOfAllocatableRegisters,
- kNumberOfAllocatableFloatingPointRegisters,
+ kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs),
block_labels_(nullptr),
location_builder_(graph, this),
@@ -359,35 +392,6 @@ void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
-void CodeGeneratorARM64::MoveHelper(Location destination,
- Location source,
- Primitive::Type type) {
- if (source.Equals(destination)) {
- return;
- }
- if (destination.IsRegister()) {
- Register dst = RegisterFrom(destination, type);
- if (source.IsRegister()) {
- Register src = RegisterFrom(source, type);
- DCHECK(dst.IsSameSizeAndType(src));
- __ Mov(dst, src);
- } else {
- DCHECK(dst.Is64Bits() || !source.IsDoubleStackSlot());
- __ Ldr(dst, StackOperandFrom(source));
- }
- } else {
- DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
- } else {
- UseScratchRegisterScope temps(assembler_.vixl_masm_);
- Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
- __ Ldr(temp, StackOperandFrom(source));
- __ Str(temp, StackOperandFrom(destination));
- }
- }
-}
-
void CodeGeneratorARM64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
@@ -397,6 +401,7 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
}
Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
@@ -418,20 +423,10 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
MoveHelper(location, temp_location, type);
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- switch (type) {
- case Primitive::kPrimNot:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- MoveHelper(location, Location::StackSlot(stack_slot), type);
- break;
- case Primitive::kPrimLong:
- MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
- break;
- default:
- LOG(FATAL) << "Unimplemented type" << type;
+ if (Is64BitType(type)) {
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ } else {
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
}
} else {
@@ -446,24 +441,25 @@ size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
+
switch (type) {
case Primitive::kPrimNot:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
case Primitive::kPrimLong:
- return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented type " << type;
- break;
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
case Primitive::kPrimVoid:
- default:
LOG(FATAL) << "Unexpected type " << type;
}
+
LOG(FATAL) << "Unreachable";
return Location::NoLocation();
}
@@ -487,13 +483,19 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// xSuspend (Suspend counter)
// lr
// sp is not part of the allocatable registers, so we don't need to block it.
+ // TODO: Avoid blocking callee-saved registers, and instead preserve them
+ // where necessary.
CPURegList reserved_core_registers = vixl_reserved_core_registers;
reserved_core_registers.Combine(runtime_reserved_core_registers);
- // TODO: See if we should instead allow allocating but preserve those if used.
reserved_core_registers.Combine(quick_callee_saved_registers);
while (!reserved_core_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
}
+ CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
+ reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP());
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
+ }
}
Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
@@ -501,17 +503,13 @@ Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
LOG(FATAL) << "Unreachable type " << type;
}
- if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented support for floating-point";
- }
-
- ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfXRegisters);
- DCHECK_NE(reg, -1);
- blocked_core_registers_[reg] = true;
-
if (IsFPType(type)) {
+ ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
+ DCHECK_NE(reg, -1);
return Location::FpuRegisterLocation(reg);
} else {
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
+ DCHECK_NE(reg, -1);
return Location::RegisterLocation(reg);
}
}
@@ -524,8 +522,107 @@ void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg
stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
}
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else {
+ __ Mov(dst, OperandFrom(source, type));
+ }
+ } else if (destination.IsFpuRegister()) {
+ FPRegister dst = FPRegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else if (source.IsFpuRegister()) {
+ __ Fmov(dst, FPRegisterFrom(source, type));
+ } else {
+ HConstant* cst = source.GetConstant();
+ if (cst->IsFloatConstant()) {
+ __ Fmov(dst, cst->AsFloatConstant()->GetValue());
+ } else {
+ DCHECK(cst->IsDoubleConstant());
+ __ Fmov(dst, cst->AsDoubleConstant()->GetValue());
+ }
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else if (source.IsFpuRegister()) {
+ __ Str(FPRegisterFrom(source, type), StackOperandFrom(destination));
+ } else {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Load(Primitive::Type type,
+ vixl::Register dst,
+ const vixl::MemOperand& src) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ __ Ldrb(dst, src);
+ break;
+ case Primitive::kPrimByte:
+ __ Ldrsb(dst, src);
+ break;
+ case Primitive::kPrimShort:
+ __ Ldrsh(dst, src);
+ break;
+ case Primitive::kPrimChar:
+ __ Ldrh(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK(dst.Is64Bits() == (type == Primitive::kPrimLong));
+ __ Ldr(dst, src);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
+void CodeGeneratorARM64::Store(Primitive::Type type,
+ vixl::Register rt,
+ const vixl::MemOperand& dst) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ __ Strb(rt, dst);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ __ Strh(rt, dst);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ DCHECK(rt.Is64Bits() == (type == Primitive::kPrimLong));
+ __ Str(rt, dst);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
#undef __
-#define __ assembler_->vixl_masm_->
+#define __ GetAssembler()->vixl_masm_->
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
@@ -534,25 +631,23 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- M(ArrayGet) \
- M(ArraySet) \
+ M(And) \
+ M(CheckCast) \
M(ClinitCheck) \
- M(DoubleConstant) \
- M(Div) \
M(DivZeroCheck) \
- M(FloatConstant) \
+ M(InstanceOf) \
M(InvokeInterface) \
M(LoadClass) \
M(LoadException) \
M(LoadString) \
- M(Neg) \
- M(NewArray) \
+ M(MonitorOperation) \
+ M(Or) \
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
- M(TypeCheck) \
M(TypeConversion) \
+ M(Xor) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
@@ -583,20 +678,21 @@ void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
Primitive::Type type = instr->GetResultType();
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong: {
+ case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
- }
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
+
default:
- LOG(FATAL) << "Unimplemented " << instr->DebugName() << " type " << type;
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
}
}
@@ -604,28 +700,34 @@ void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
DCHECK(instr->IsAdd() || instr->IsSub());
Primitive::Type type = instr->GetType();
- Register dst = OutputRegister(instr);
- Register lhs = InputRegisterAt(instr, 0);
- Operand rhs = InputOperandAt(instr, 1);
switch (type) {
case Primitive::kPrimInt:
- case Primitive::kPrimLong:
+ case Primitive::kPrimLong: {
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
if (instr->IsAdd()) {
__ Add(dst, lhs, rhs);
} else {
__ Sub(dst, lhs, rhs);
}
break;
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected add/sub type " << type;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FPRegister dst = OutputFPRegister(instr);
+ FPRegister lhs = InputFPRegisterAt(instr, 0);
+ FPRegister rhs = InputFPRegisterAt(instr, 1);
+ if (instr->IsAdd()) {
+ __ Fadd(dst, lhs, rhs);
+ } else {
+ __ Fsub(dst, lhs, rhs);
+ }
break;
+ }
default:
- LOG(FATAL) << "Unimplemented add/sub type " << type;
+ LOG(FATAL) << "Unexpected add/sub type " << type;
}
}
@@ -637,6 +739,37 @@ void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
HandleAddSub(instruction);
}
+void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ Register obj = InputRegisterAt(instruction, 0);
+ Register out = OutputRegister(instruction);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
+ MemOperand source(obj);
+ UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
+ source = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
+ source = MemOperand(temp, offset);
+ }
+
+ codegen_->Load(type, out, source);
+}
+
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
@@ -648,6 +781,53 @@ void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction)
HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
}
+void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ if (value_type == Primitive::kPrimNot) {
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAputObject).Int32Value()));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+ } else {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);
+ Register value = InputRegisterAt(instruction, 2);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+ MemOperand destination(obj);
+ UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+ destination = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = InputRegisterAt(instruction, 1);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
+ destination = MemOperand(temp, offset);
+ }
+
+ codegen_->Store(value_type, value, destination);
+ }
+}
+
void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -715,6 +895,58 @@ void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(
FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
#undef FOR_EACH_CONDITION_INSTRUCTION
+void LocationsBuilderARM64::VisitDiv(HDiv* div) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
void LocationsBuilderARM64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
@@ -727,6 +959,17 @@ void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
}
}
+void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
void LocationsBuilderARM64::VisitGoto(HGoto* got) {
got->SetLocations(nullptr);
}
@@ -793,44 +1036,9 @@ void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
- Primitive::Type res_type = instruction->GetType();
- Register res = OutputRegister(instruction);
- Register obj = InputRegisterAt(instruction, 0);
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
- switch (res_type) {
- case Primitive::kPrimBoolean: {
- __ Ldrb(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimByte: {
- __ Ldrsb(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimShort: {
- __ Ldrsh(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimChar: {
- __ Ldrh(res, MemOperand(obj, offset));
- break;
- }
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong: { // TODO: support volatile.
- DCHECK(res.IsX() == (res_type == Primitive::kPrimLong));
- __ Ldr(res, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register res_type " << res_type;
- break;
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable res_type " << res_type;
- }
+ MemOperand field = MemOperand(InputRegisterAt(instruction, 0),
+ instruction->GetFieldOffset().Uint32Value());
+ codegen_->Load(instruction->GetType(), OutputRegister(instruction), field);
}
void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -840,43 +1048,12 @@ void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- Register obj = InputRegisterAt(instruction, 0);
+ Primitive::Type field_type = instruction->GetFieldType();
Register value = InputRegisterAt(instruction, 1);
- Primitive::Type field_type = instruction->InputAt(1)->GetType();
- uint32_t offset = instruction->GetFieldOffset().Uint32Value();
-
- switch (field_type) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte: {
- __ Strb(value, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimShort:
- case Primitive::kPrimChar: {
- __ Strh(value, MemOperand(obj, offset));
- break;
- }
-
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong: {
- DCHECK(value.IsX() == (field_type == Primitive::kPrimLong));
- __ Str(value, MemOperand(obj, offset));
-
- if (field_type == Primitive::kPrimNot) {
- codegen_->MarkGCCard(obj, value);
- }
- break;
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- break;
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << field_type;
+ Register obj = InputRegisterAt(instruction, 0);
+ codegen_->Store(field_type, value, MemOperand(obj, instruction->GetFieldOffset().Uint32Value()));
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, value);
}
}
@@ -932,11 +1109,13 @@ void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = method;
__ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
// temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, MemOperand(temp.X(), mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ Ldr(temp, MemOperand(temp.X(),
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
// temp = temp[index_in_cache];
__ Ldr(temp, MemOperand(temp.X(), index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, MemOperand(temp.X(), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ Ldr(lr, MemOperand(temp.X(),
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
// lr();
__ Blr(lr);
@@ -1012,7 +1191,9 @@ void LocationsBuilderARM64::VisitMul(HMul* mul) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
default:
@@ -1029,7 +1210,7 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
break;
default:
@@ -1037,6 +1218,71 @@ void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
}
}
+void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Not yet implemented neg type " << neg->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(LocationFrom(x0));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ __ Mov(type_index, instruction->GetTypeIndex());
+ int32_t quick_entrypoint_offset =
+ QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocArrayWithAccessCheck).Int32Value();
+ __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -1054,7 +1300,9 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
DCHECK(current_method.Is(w1));
__ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
__ Mov(type_index, instruction->GetTypeIndex());
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value()));
+ int32_t quick_entrypoint_offset =
+ QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value();
+ __ Ldr(lr, MemOperand(tr, quick_entrypoint_offset));
__ Blr(lr);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
@@ -1138,35 +1386,11 @@ void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Primitive::Type return_type = instruction->InputAt(0)->GetType();
-
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
-
- locations->SetInAt(0, LocationFrom(x0));
+ locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- if (kIsDebugBuild) {
- Primitive::Type type = instruction->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- DCHECK(InputRegisterAt(instruction, 0).Is(w0));
- break;
-
- case Primitive::kPrimLong:
- DCHECK(InputRegisterAt(instruction, 0).Is(x0));
- break;
-
- default:
- LOG(FATAL) << "Unimplemented return type " << type;
- }
- }
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1185,16 +1409,18 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
Primitive::Type field_type = store->InputAt(1)->GetType();
switch (field_type) {
+ case Primitive::kPrimNot:
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimNot:
+ case Primitive::kPrimFloat:
locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
break;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 4a41000e8d..54e87f4d9c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -45,11 +45,14 @@ const vixl::Register wSuspend = vixl::w19; // Suspend Register
const vixl::Register xSuspend = vixl::x19;
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
vixl::kXRegSize,
kArm64CalleeSaveRefSpills);
+Location ARM64ReturnLocation(Primitive::Type return_type);
+
class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
public:
InvokeDexCallingConvention()
@@ -59,11 +62,7 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl
kParameterFPRegistersLength) {}
Location GetReturnLocation(Primitive::Type return_type) {
- DCHECK_NE(return_type, Primitive::kPrimVoid);
- if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
- LOG(FATAL) << "Unimplemented return type " << return_type;
- }
- return Location::RegisterLocation(X0);
+ return ARM64ReturnLocation(return_type);
}
@@ -73,7 +72,7 @@ class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl
class InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
Location GetNextLocation(Primitive::Type type);
Location GetReturnLocation(Primitive::Type return_type) {
@@ -84,6 +83,8 @@ class InvokeDexCallingConventionVisitor {
InvokeDexCallingConvention calling_convention;
// The current index for core registers.
uint32_t gp_index_;
+ // The current index for floating-point registers.
+ uint32_t fp_index_;
// The current stack index.
uint32_t stack_index_;
@@ -204,10 +205,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
// (xzr, wzr), or make for poor allocatable registers (sp alignment
// requirements, etc.). This also facilitates our task as all other registers
// can easily be mapped via to or from their type and index or code.
- static const int kNumberOfAllocatableCoreRegisters = vixl::kNumberOfRegisters - 1;
- static const int kNumberOfAllocatableFloatingPointRegisters = vixl::kNumberOfFPRegisters;
- static const int kNumberOfAllocatableRegisters =
- kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
+ static const int kNumberOfAllocatableRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFPRegisters = vixl::kNumberOfFPRegisters;
static constexpr int kNumberOfAllocatableRegisterPairs = 0;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -217,8 +216,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
return InstructionSet::kArm64;
}
- void MoveHelper(Location destination, Location source, Primitive::Type type);
-
void Initialize() OVERRIDE {
HGraph* graph = GetGraph();
int length = graph->GetBlocks().Size();
@@ -228,6 +225,16 @@ class CodeGeneratorARM64 : public CodeGenerator {
}
}
+ // Code generation helpers.
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+ void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
+ void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
+ return nullptr;
+ }
+
private:
// Labels for each block that will be compiled.
vixl::Label* block_labels_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 82591b0ebf..6e1abbf076 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,7 +36,7 @@ static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { };
@@ -140,9 +140,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -270,13 +275,19 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -285,28 +296,33 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ x86_codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ }
+
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -559,7 +575,7 @@ void CodeGeneratorX86::Move32(Location destination, Location source) {
__ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
@@ -620,7 +636,7 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
LOG(FATAL) << "Unimplemented";
}
} else {
- DCHECK(destination.IsDoubleStackSlot());
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
if (source.IsRegisterPair()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
@@ -646,31 +662,44 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<Register>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
- __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
- } else if (location.IsDoubleStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<Register>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), instruction);
+ }
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
@@ -702,12 +731,12 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- Move32(location, instruction->GetLocations()->Out());
+ Move32(location, locations->Out());
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- Move64(location, instruction->GetLocations()->Out());
+ Move64(location, locations->Out());
break;
default:
@@ -1261,11 +1290,48 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
@@ -1283,7 +1349,6 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1303,11 +1368,63 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<Register>(), in.As<ByteRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ if (in.IsRegisterPair()) {
+ __ movl(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
@@ -1327,7 +1444,6 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1384,7 +1500,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1460,7 +1576,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1597,8 +1713,11 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
}
void LocationsBuilderX86::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RegisterLocation(EAX));
@@ -1609,7 +1728,13 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // Runtime helper puts the result in EAX, EDX.
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
}
case Primitive::kPrimFloat:
@@ -1627,12 +1752,13 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.Equals(out));
Register first_reg = first.As<Register>();
Register second_reg = second.As<Register>();
DCHECK_EQ(EAX, first_reg);
@@ -1659,16 +1785,28 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(EAX, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ codegen_->RecordPcInfo(div, div->GetDexPc());
+
break;
}
case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(out));
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(out));
__ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
@@ -1681,7 +1819,21 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::Any());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::Any());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (!instruction->IsConstant()) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1694,18 +1846,39 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<Register>(), value.As<Register>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ __ movl(temp, value.AsRegisterPairLow<Register>());
+ __ orl(temp, value.AsRegisterPairHigh<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck" << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
@@ -2751,7 +2924,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2761,7 +2934,7 @@ void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Location cls = locations->InAt(1);
@@ -2792,7 +2965,7 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2806,5 +2979,148 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ __ movl(temp, Address(obj, class_offset));
+
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ fs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), second.As<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), second.As<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), second.As<Register>());
+ }
+ } else if (second.IsConstant()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (second.IsRegisterPair()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ andl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ orl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ andl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ orl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ xorl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ }
+ }
+ }
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 85fe21ca76..841b28b158 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -100,9 +100,10 @@ class LocationsBuilderX86 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
void HandleInvoke(HInvoke* invoke);
- private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -128,6 +129,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -181,7 +183,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverX86* GetMoveResolver() {
+ ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d2730a5b37..2393fb5119 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -108,16 +108,23 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
class DivMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- explicit DivMinusOneSlowPathX86_64(Register reg) : reg_(reg) {}
+ explicit DivMinusOneSlowPathX86_64(Register reg, Primitive::Type type)
+ : reg_(reg), type_(type) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
- __ negl(CpuRegister(reg_));
+ if (type_ == Primitive::kPrimInt) {
+ __ negl(CpuRegister(reg_));
+ } else {
+ DCHECK_EQ(Primitive::kPrimLong, type_);
+ __ negq(CpuRegister(reg_));
+ }
__ jmp(GetExitLabel());
}
private:
- Register reg_;
+ const Register reg_;
+ const Primitive::Type type_;
DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86_64);
};
@@ -179,13 +186,15 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -284,13 +293,19 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86_64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -299,29 +314,35 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ }
+ codegen->RecordPcInfo(instruction_, dex_pc_);
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -563,26 +584,34 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<CpuRegister>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movq(location.As<CpuRegister>(), Immediate(value));
- } else if (location.IsDoubleStackSlot()) {
- __ movq(CpuRegister(TMP), Immediate(value));
- __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<CpuRegister>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ movq(location.As<CpuRegister>(), Immediate(value));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movq(CpuRegister(TMP), Immediate(value));
+ __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
}
} else if (instruction->IsLoadLocal()) {
switch (instruction->GetType()) {
@@ -619,7 +648,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- Move(location, instruction->GetLocations()->Out());
+ Move(location, locations->Out());
break;
default:
@@ -1259,11 +1288,48 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
// TODO: We would benefit from a (to-be-implemented)
// Location::RegisterOrStackSlot requirement for this input.
@@ -1283,7 +1349,6 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1303,12 +1368,66 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // int-to-byte conversion.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // long-to-int conversion.
+ if (in.IsRegister()) {
+ __ movl(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
DCHECK(out.IsRegister());
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
// int-to-long conversion.
DCHECK(in.IsRegister());
__ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
@@ -1326,7 +1445,6 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
}
break;
- case Primitive::kPrimInt:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1545,7 +1663,8 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RegisterLocation(RAX));
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -1553,10 +1672,7 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
locations->AddTemp(Location::RegisterLocation(RDX));
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -1576,38 +1692,42 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
Location second = locations->InAt(1);
DCHECK(first.Equals(locations->Out()));
- switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
CpuRegister first_reg = first.As<CpuRegister>();
CpuRegister second_reg = second.As<CpuRegister>();
DCHECK_EQ(RAX, first_reg.AsRegister());
DCHECK_EQ(RDX, locations->GetTemp(0).As<CpuRegister>().AsRegister());
SlowPathCodeX86_64* slow_path =
- new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister());
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister(), type);
codegen_->AddSlowPath(slow_path);
- // 0x80000000/-1 triggers an arithmetic exception!
- // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
- // it's safe to just use negl instead of more complex comparisons.
+ // 0x80000000(00000000)/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000(00000000) = 0x80000000(00000000)
+ // so it's safe to just use negl instead of more complex comparisons.
__ cmpl(second_reg, Immediate(-1));
__ j(kEqual, slow_path->GetEntryLabel());
- // edx:eax <- sign-extended of eax
- __ cdq();
- // eax = quotient, edx = remainder
- __ idivl(second_reg);
+ if (type == Primitive::kPrimInt) {
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+ } else {
+ // rdx:rax <- sign-extended of rax
+ __ cqo();
+ // rax = quotient, rdx = remainder
+ __ idivq(second_reg);
+ }
__ Bind(slow_path->GetExitLabel());
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
-
case Primitive::kPrimFloat: {
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
@@ -1640,18 +1760,40 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegister()) {
+ __ testq(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsDoubleStackSlot()) {
+ __ cmpq(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
@@ -2741,7 +2883,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2751,7 +2893,7 @@ void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location cls = locations->InAt(1);
@@ -2781,7 +2923,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, Location::RegisterLocation(out.AsRegister()));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2795,5 +2937,135 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ movl(temp, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the checkcast to succeed.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ gs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
+ true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->GetType() == Primitive::kPrimInt) {
+ locations->SetInAt(1, Location::Any());
+ } else {
+ // Request a register to avoid loading a 64bits constant.
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86_64::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), imm);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), imm);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), imm);
+ }
+ } else {
+ Address address(CpuRegister(RSP), second.GetStackIndex());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), address);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), address);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), address);
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (instruction->IsAnd()) {
+ __ andq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ }
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9565b6f876..4c6e4750d7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -104,9 +104,10 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
- private:
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -132,6 +133,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
@@ -171,7 +173,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return &assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() {
+ ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index ecee44392e..9752b1d34b 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -362,6 +362,27 @@ NOT_LONG_TEST(ReturnNotLongINT64_MAX,
#undef NOT_LONG_TEST
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_IntToLongOfLongToInt) {
+#else
+TEST(CodegenTest, IntToLongOfLongToInt) {
+#endif
+ const int64_t input = INT64_C(4294967296); // 2^32
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
+ const uint16_t word1 = High16Bits(Low32Bits(input));
+ const uint16_t word2 = Low16Bits(High32Bits(input));
+ const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
+ const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
+ Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
+ Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
+ Instruction::LONG_TO_INT | 4 << 8 | 0 << 12,
+ Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
+ Instruction::RETURN_WIDE | 2 << 8);
+
+ TestCodeLong(data, true, 1);
+}
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2dab605465..5af3cdd2d6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -90,7 +90,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
- number_of_temporaries_(0),
+ temporaries_vreg_slots_(0),
current_instruction_id_(0) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -129,12 +129,12 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
maximum_number_of_out_vregs_ = std::max(new_value, maximum_number_of_out_vregs_);
}
- void UpdateNumberOfTemporaries(size_t count) {
- number_of_temporaries_ = std::max(count, number_of_temporaries_);
+ void UpdateTemporariesVRegSlots(size_t slots) {
+ temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
}
- size_t GetNumberOfTemporaries() const {
- return number_of_temporaries_;
+ size_t GetTemporariesVRegSlots() const {
+ return temporaries_vreg_slots_;
}
void SetNumberOfVRegs(uint16_t number_of_vregs) {
@@ -192,8 +192,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// The number of virtual registers used by parameters of this method.
uint16_t number_of_in_vregs_;
- // The number of temporaries that will be needed for the baseline compiler.
- size_t number_of_temporaries_;
+ // Number of vreg size slots that the temporaries use (used in baseline compiler).
+ size_t temporaries_vreg_slots_;
// The current id to assign to a newly added instruction. See HInstruction.id_.
int current_instruction_id_;
@@ -475,10 +475,12 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add, BinaryOperation) \
+ M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
M(BoundsCheck, Instruction) \
+ M(CheckCast, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -494,6 +496,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(InvokeInterface, Invoke) \
M(InvokeStatic, Invoke) \
@@ -506,6 +509,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
+ M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
M(Neg, UnaryOperation) \
M(NewArray, Instruction) \
@@ -513,6 +517,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(Not, UnaryOperation) \
M(NotEqual, Condition) \
M(NullCheck, Instruction) \
+ M(Or, BinaryOperation) \
M(ParallelMove, Instruction) \
M(ParameterValue, Instruction) \
M(Phi, Instruction) \
@@ -525,8 +530,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
- M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
+ M(Xor, BinaryOperation) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1745,8 +1750,8 @@ class HMul : public HBinaryOperation {
class HDiv : public HBinaryOperation {
public:
- HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
virtual int32_t Evaluate(int32_t x, int32_t y) const {
// Our graph structure ensures we never have 0 for `y` during constant folding.
@@ -1756,9 +1761,13 @@ class HDiv : public HBinaryOperation {
}
virtual int64_t Evaluate(int64_t x, int64_t y) const { return x / y; }
+ uint32_t GetDexPc() const { return dex_pc_; }
+
DECLARE_INSTRUCTION(Div);
private:
+ const uint32_t dex_pc_;
+
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
@@ -1789,6 +1798,54 @@ class HDivZeroCheck : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
};
+class HAnd : public HBinaryOperation {
+ public:
+ HAnd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; }
+
+ DECLARE_INSTRUCTION(And);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HAnd);
+};
+
+class HOr : public HBinaryOperation {
+ public:
+ HOr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; }
+
+ DECLARE_INSTRUCTION(Or);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HOr);
+};
+
+class HXor : public HBinaryOperation {
+ public:
+ HXor(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; }
+
+ DECLARE_INSTRUCTION(Xor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HXor);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -2105,8 +2162,8 @@ class HBoundsCheck : public HExpression<2> {
* Some DEX instructions are folded into multiple HInstructions that need
* to stay live until the last HInstruction. This class
* is used as a marker for the baseline compiler to ensure its preceding
- * HInstruction stays live. `index` is the temporary number that is used
- * for knowing the stack offset where to store the instruction.
+ * HInstruction stays live. `index` represents the stack location index of the
+ * instruction (the actual offset is computed as index * vreg_size).
*/
class HTemporary : public HTemplateInstruction<0> {
public:
@@ -2114,7 +2171,11 @@ class HTemporary : public HTemplateInstruction<0> {
size_t GetIndex() const { return index_; }
- Primitive::Type GetType() const OVERRIDE { return GetPrevious()->GetType(); }
+ Primitive::Type GetType() const OVERRIDE {
+ // The previous instruction is the one that will be stored in the temporary location.
+ DCHECK(GetPrevious() != nullptr);
+ return GetPrevious()->GetType();
+ }
DECLARE_INSTRUCTION(Temporary);
@@ -2351,12 +2412,12 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
-class HTypeCheck : public HExpression<2> {
+class HInstanceOf : public HExpression<2> {
public:
- explicit HTypeCheck(HInstruction* object,
- HLoadClass* constant,
- bool class_is_final,
- uint32_t dex_pc)
+ HInstanceOf(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
dex_pc_(dex_pc) {
@@ -2366,13 +2427,11 @@ class HTypeCheck : public HExpression<2> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool NeedsEnvironment() const OVERRIDE {
- // TODO: Can we debug when doing a runtime instanceof check?
return false;
}
@@ -2380,13 +2439,82 @@ class HTypeCheck : public HExpression<2> {
bool IsClassFinal() const { return class_is_final_; }
- DECLARE_INSTRUCTION(TypeCheck);
+ DECLARE_INSTRUCTION(InstanceOf);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
+};
+
+class HCheckCast : public HTemplateInstruction<2> {
+ public:
+ HCheckCast(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Instruction may throw a CheckCastError.
+ return true;
+ }
+
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(CheckCast);
private:
const bool class_is_final_;
const uint32_t dex_pc_;
- DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+ DISALLOW_COPY_AND_ASSIGN(HCheckCast);
+};
+
+class HMonitorOperation : public HTemplateInstruction<1> {
+ public:
+ enum OperationKind {
+ kEnter,
+ kExit,
+ };
+
+ HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ }
+
+ // Instruction may throw a Java exception, so we need an environment.
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsEnter() const { return kind_ == kEnter; }
+
+ DECLARE_INSTRUCTION(MonitorOperation);
+
+ protected:
+ const OperationKind kind_;
+ const uint32_t dex_pc_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3d81362851..ba4be34ca3 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -713,7 +713,7 @@ static HGraph* BuildDiv(ArenaAllocator* allocator,
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div = new (allocator) HDiv(Primitive::kPrimInt, first, second);
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
block->AddInstruction(new (allocator) HExit());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index e83c528fab..fec40f93c7 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -253,4 +253,9 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
instruction->SetEnvironment(environment);
}
+void SsaBuilder::VisitTemporary(HTemporary* temp) {
+ // Temporaries are only used by the baseline register allocator.
+ temp->GetBlock()->RemoveInstruction(temp);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 24f5ac55f7..2207cd6bfa 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -51,6 +51,7 @@ class SsaBuilder : public HGraphVisitor {
void VisitLoadLocal(HLoadLocal* load);
void VisitStoreLocal(HStoreLocal* store);
void VisitInstruction(HInstruction* instruction);
+ void VisitTemporary(HTemporary* instruction);
static HInstruction* GetFloatOrDoubleEquivalent(HInstruction* user,
HInstruction* instruction,
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dca2ab7517..911000aee8 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -421,6 +421,9 @@ class ArmAssembler : public Assembler {
virtual void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
virtual void udiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
+ virtual void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
+ Condition cond = AL) = 0;
+
// Load/store instructions.
virtual void ldr(Register rd, const Address& ad, Condition cond = AL) = 0;
virtual void str(Register rd, const Address& ad, Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index c8a57b1873..29cbf5851f 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -208,6 +208,25 @@ void Arm32Assembler::udiv(Register rd, Register rn, Register rm, Condition cond)
}
+void Arm32Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B24 | B23 | B21 |
+ (widthminus1 << 16) |
+ (static_cast<uint32_t>(rd) << 12) |
+ (lsb << 7) |
+ B6 | B4 |
+ static_cast<uint32_t>(rn);
+ Emit(encoding);
+}
+
+
void Arm32Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitMemOp(cond, true, false, rd, ad);
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index dbabb99933..b582e9e5ab 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -96,6 +96,8 @@ class Arm32Assembler FINAL : public ArmAssembler {
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
new file mode 100644
index 0000000000..3ba77b5e74
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm32.h"
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+class AssemblerArm32Test : public AssemblerTest<arm::Arm32Assembler,
+ arm::Register,
+ uint32_t> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ private:
+ std::vector<arm::Register*> registers_;
+};
+
+
+TEST_F(AssemblerArm32Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+TEST_F(AssemblerArm32Test, Sbfx) {
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "sbfx r0, r1, #0, #1\n"
+ "sbfx r0, r1, #0, #8\n"
+ "sbfx r0, r1, #0, #16\n"
+ "sbfx r0, r1, #0, #32\n"
+
+ "sbfx r0, r1, #8, #1\n"
+ "sbfx r0, r1, #8, #8\n"
+ "sbfx r0, r1, #8, #16\n"
+ "sbfx r0, r1, #8, #24\n"
+
+ "sbfx r0, r1, #16, #1\n"
+ "sbfx r0, r1, #16, #8\n"
+ "sbfx r0, r1, #16, #16\n"
+
+ "sbfx r0, r1, #31, #1\n";
+ DriverStr(expected, "sbfx");
+}
+
+} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 053e843377..a309e187df 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -264,6 +264,27 @@ void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond
}
+void Thumb2Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CheckCondition(cond);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+ uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
+ uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
+
+ uint32_t op = 20U /* 0b10100 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 |
+ op << 20 |
+ static_cast<uint32_t>(rn) << 16 |
+ imm3 << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ imm2 << 6 |
+ widthminus1;
+
+ Emit32(encoding);
+}
+
+
void Thumb2Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitLoadStore(cond, true, false, false, false, rd, ad);
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 9ccdef7e1e..1fc842cf56 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -118,6 +118,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
new file mode 100644
index 0000000000..3d9c70d734
--- /dev/null
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_thumb2.h"
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
+ arm::Register,
+ uint32_t> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " -mthumb";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ private:
+ std::vector<arm::Register*> registers_;
+};
+
+
+TEST_F(AssemblerThumb2Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+TEST_F(AssemblerThumb2Test, Sbfx) {
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "sbfx r0, r1, #0, #1\n"
+ "sbfx r0, r1, #0, #8\n"
+ "sbfx r0, r1, #0, #16\n"
+ "sbfx r0, r1, #0, #32\n"
+
+ "sbfx r0, r1, #8, #1\n"
+ "sbfx r0, r1, #8, #8\n"
+ "sbfx r0, r1, #8, #16\n"
+ "sbfx r0, r1, #8, #24\n"
+
+ "sbfx r0, r1, #16, #1\n"
+ "sbfx r0, r1, #16, #8\n"
+ "sbfx r0, r1, #16, #16\n"
+
+ "sbfx r0, r1, #31, #1\n";
+ DriverStr(expected, "sbfx");
+}
+
+} // namespace art
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 91237ae910..0378176afd 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -394,10 +394,19 @@ class AssemblerTest : public testing::Test {
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetAssemblerCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetAssemblerCommand());
args.push_back("-o");
args.push_back(to_file);
args.push_back(from_file);
+ std::string cmd = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
return Exec(args, error_msg);
}
@@ -414,6 +423,9 @@ class AssemblerTest : public testing::Test {
std::string error_msg;
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetObjdumpCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetObjdumpCommand());
args.push_back(file);
args.push_back(">");
@@ -490,6 +502,9 @@ class AssemblerTest : public testing::Test {
bool DisassembleBinary(std::string file, std::string* error_msg) {
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetDisassembleCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetDisassembleCommand());
args.push_back(file);
args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 4ddf9793fd..8ebb40e338 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -873,6 +873,13 @@ void X86Assembler::andl(Register dst, Register src) {
}
+void X86Assembler::andl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x23);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::andl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(4, Operand(dst), imm);
@@ -886,6 +893,13 @@ void X86Assembler::orl(Register dst, Register src) {
}
+void X86Assembler::orl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0B);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::orl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(1, Operand(dst), imm);
@@ -898,11 +912,20 @@ void X86Assembler::xorl(Register dst, Register src) {
EmitOperand(dst, Operand(src));
}
+
+void X86Assembler::xorl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x33);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::xorl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(6, Operand(dst), imm);
}
+
void X86Assembler::addl(Register reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(0, Operand(reg), imm);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index de4e6de878..8aed9348d6 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -350,12 +350,15 @@ class X86Assembler FINAL : public Assembler {
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
+ void andl(Register dst, const Address& address);
void orl(Register dst, const Immediate& imm);
void orl(Register dst, Register src);
+ void orl(Register dst, const Address& address);
void xorl(Register dst, Register src);
void xorl(Register dst, const Immediate& imm);
+ void xorl(Register dst, const Address& address);
void addl(Register dst, Register src);
void addl(Register reg, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 5b706584bd..bd08b1ff2a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -120,6 +120,7 @@ void X86_64Assembler::movq(CpuRegister dst, const Immediate& imm) {
void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) {
+ CHECK(imm.is_int32());
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
EmitUint8(0xB8 + dst.LowBits());
@@ -353,7 +354,7 @@ void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, src);
EmitUint8(0x63);
EmitRegisterOperand(dst.LowBits(), src.LowBits());
}
@@ -908,6 +909,21 @@ void X86_64Assembler::cmpl(CpuRegister reg, const Address& address) {
}
+void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x39);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::cmpq(CpuRegister reg0, CpuRegister reg1) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg0, reg1);
@@ -932,6 +948,14 @@ void X86_64Assembler::cmpq(CpuRegister reg, const Address& address) {
}
+void X86_64Assembler::cmpq(const Address& address, const Immediate& imm) {
+ CHECK(imm.is_int32()); // cmpq only supports 32b immediate.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::addl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -948,21 +972,6 @@ void X86_64Assembler::addl(CpuRegister reg, const Address& address) {
}
-void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(reg, address);
- EmitUint8(0x39);
- EmitOperand(reg.LowBits(), address);
-}
-
-
-void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(address);
- EmitComplex(7, address, imm);
-}
-
-
void X86_64Assembler::testl(CpuRegister reg1, CpuRegister reg2) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg1, reg2);
@@ -997,6 +1006,14 @@ void X86_64Assembler::testl(CpuRegister reg, const Immediate& immediate) {
}
+void X86_64Assembler::testq(CpuRegister reg1, CpuRegister reg2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg1, reg2);
+ EmitUint8(0x85);
+ EmitRegisterOperand(reg1.LowBits(), reg2.LowBits());
+}
+
+
void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg);
@@ -1013,6 +1030,14 @@ void X86_64Assembler::andl(CpuRegister dst, CpuRegister src) {
}
+void X86_64Assembler::andl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x23);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::andl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1028,6 +1053,14 @@ void X86_64Assembler::andq(CpuRegister reg, const Immediate& imm) {
}
+void X86_64Assembler::andq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x23);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1036,6 +1069,14 @@ void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
}
+void X86_64Assembler::orl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x0B);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1043,6 +1084,14 @@ void X86_64Assembler::orl(CpuRegister dst, const Immediate& imm) {
}
+void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0B);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1051,6 +1100,21 @@ void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
}
+void X86_64Assembler::xorl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x33);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::xorl(CpuRegister dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst);
+ EmitComplex(6, Operand(dst), imm);
+}
+
+
void X86_64Assembler::xorq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(dst, src);
@@ -1219,6 +1283,13 @@ void X86_64Assembler::cdq() {
}
+void X86_64Assembler::cqo() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64();
+ EmitUint8(0x99);
+}
+
+
void X86_64Assembler::idivl(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1227,6 +1298,14 @@ void X86_64Assembler::idivl(CpuRegister reg) {
}
+void X86_64Assembler::idivq(CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0xF7);
+ EmitUint8(0xF8 | reg.LowBits());
+}
+
+
void X86_64Assembler::imull(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1772,10 +1851,20 @@ void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, const Operand& operand)
}
}
+void X86_64Assembler::EmitRex64() {
+ EmitOptionalRex(false, true, false, false, false);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister reg) {
EmitOptionalRex(false, true, false, false, reg.NeedsRex());
}
+void X86_64Assembler::EmitRex64(const Operand& operand) {
+ uint8_t rex = operand.rex();
+ rex |= 0x48; // REX.W000
+ EmitUint8(rex);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) {
EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 42d774a558..b46f6f71e3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -390,20 +390,28 @@ class X86_64Assembler FINAL : public Assembler {
void cmpq(CpuRegister reg0, CpuRegister reg1);
void cmpq(CpuRegister reg0, const Immediate& imm);
void cmpq(CpuRegister reg0, const Address& address);
+ void cmpq(const Address& address, const Immediate& imm);
void testl(CpuRegister reg1, CpuRegister reg2);
void testl(CpuRegister reg, const Immediate& imm);
+ void testq(CpuRegister reg1, CpuRegister reg2);
void testq(CpuRegister reg, const Address& address);
void andl(CpuRegister dst, const Immediate& imm);
void andl(CpuRegister dst, CpuRegister src);
+ void andl(CpuRegister reg, const Address& address);
void andq(CpuRegister dst, const Immediate& imm);
+ void andq(CpuRegister dst, CpuRegister src);
void orl(CpuRegister dst, const Immediate& imm);
void orl(CpuRegister dst, CpuRegister src);
+ void orl(CpuRegister reg, const Address& address);
+ void orq(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, CpuRegister src);
+ void xorl(CpuRegister dst, const Immediate& imm);
+ void xorl(CpuRegister reg, const Address& address);
void xorq(CpuRegister dst, const Immediate& imm);
void xorq(CpuRegister dst, CpuRegister src);
@@ -426,8 +434,10 @@ class X86_64Assembler FINAL : public Assembler {
void subq(CpuRegister dst, const Address& address);
void cdq();
+ void cqo();
void idivl(CpuRegister reg);
+ void idivq(CpuRegister reg);
void imull(CpuRegister dst, CpuRegister src);
void imull(CpuRegister reg, const Immediate& imm);
@@ -663,7 +673,9 @@ class X86_64Assembler FINAL : public Assembler {
void EmitOptionalRex32(XmmRegister dst, const Operand& operand);
// Emit a REX.W prefix plus necessary register bit encodings.
+ void EmitRex64();
void EmitRex64(CpuRegister reg);
+ void EmitRex64(const Operand& operand);
void EmitRex64(CpuRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, const Operand& operand);
void EmitRex64(XmmRegister dst, CpuRegister src);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d87faebac0..47b492ae7c 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -430,6 +430,8 @@ class Dex2Oat FINAL {
image_base_(0U),
image_classes_zip_filename_(nullptr),
image_classes_filename_(nullptr),
+ compiled_classes_zip_filename_(nullptr),
+ compiled_classes_filename_(nullptr),
image_(false),
is_host_(false),
dump_stats_(false),
@@ -540,6 +542,10 @@ class Dex2Oat FINAL {
image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
} else if (option.starts_with("--image-classes-zip=")) {
image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-classes=")) {
+ compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
+ } else if (option.starts_with("--compiled-classes-zip=")) {
+ compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
} else if (option.starts_with("--base=")) {
const char* image_base_str = option.substr(strlen("--base=")).data();
char* end;
@@ -583,6 +589,7 @@ class Dex2Oat FINAL {
compiler_kind_ = Compiler::kQuick;
} else if (backend_str == "Optimizing") {
compiler_kind_ = Compiler::kOptimizing;
+ compile_pic = true;
} else if (backend_str == "Portable") {
compiler_kind_ = Compiler::kPortable;
} else {
@@ -742,6 +749,18 @@ class Dex2Oat FINAL {
Usage("--image-classes-zip should be used with --image-classes");
}
+ if (compiled_classes_filename_ != nullptr && !image_) {
+ Usage("--compiled-classes should only be used with --image");
+ }
+
+ if (compiled_classes_filename_ != nullptr && !boot_image_option_.empty()) {
+ Usage("--compiled-classes should not be used with --boot-image");
+ }
+
+ if (compiled_classes_zip_filename_ != nullptr && compiled_classes_filename_ == nullptr) {
+ Usage("--compiled-classes-zip should be used with --compiled-classes");
+ }
+
if (dex_filenames_.empty() && zip_fd_ == -1) {
Usage("Input must be supplied with either --dex-file or --zip-fd");
}
@@ -985,6 +1004,25 @@ class Dex2Oat FINAL {
} else if (image_) {
image_classes_.reset(new std::set<std::string>);
}
+ // If --compiled-classes was specified, calculate the full list of classes to compile in the
+ // image.
+ if (compiled_classes_filename_ != nullptr) {
+ std::string error_msg;
+ if (compiled_classes_zip_filename_ != nullptr) {
+ compiled_classes_.reset(ReadImageClassesFromZip(compiled_classes_zip_filename_,
+ compiled_classes_filename_,
+ &error_msg));
+ } else {
+ compiled_classes_.reset(ReadImageClassesFromFile(compiled_classes_filename_));
+ }
+ if (compiled_classes_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of compiled classes from '"
+ << compiled_classes_filename_ << "': " << error_msg;
+ return false;
+ }
+ } else if (image_) {
+ compiled_classes_.reset(nullptr); // By default compile everything.
+ }
if (boot_image_option_.empty()) {
dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
@@ -1088,6 +1126,7 @@ class Dex2Oat FINAL {
instruction_set_features_.get(),
image_,
image_classes_.release(),
+ compiled_classes_.release(),
thread_count_,
dump_stats_,
dump_passes_,
@@ -1513,7 +1552,10 @@ class Dex2Oat FINAL {
uintptr_t image_base_;
const char* image_classes_zip_filename_;
const char* image_classes_filename_;
+ const char* compiled_classes_zip_filename_;
+ const char* compiled_classes_filename_;
std::unique_ptr<std::set<std::string>> image_classes_;
+ std::unique_ptr<std::set<std::string>> compiled_classes_;
bool image_;
std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index cdf48c360c..f1f1a56ae4 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -107,6 +107,10 @@ static void usage() {
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n");
+ fprintf(stderr,
+ " --method-filter=<method name>: only dumps methods that contain the filter.\n"
+ " Example: --method-filter=foo\n"
+ "\n");
}
const char* image_roots_descriptions_[] = {
@@ -356,12 +360,14 @@ class OatDumperOptions {
bool dump_vmap,
bool disassemble_code,
bool absolute_addresses,
+ const char* method_filter,
Handle<mirror::ClassLoader>* class_loader)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
dump_vmap_(dump_vmap),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
+ method_filter_(method_filter),
class_loader_(class_loader) {}
const bool dump_raw_mapping_table_;
@@ -369,6 +375,7 @@ class OatDumperOptions {
const bool dump_vmap_;
const bool disassemble_code_;
const bool absolute_addresses_;
+ const char* const method_filter_;
Handle<mirror::ClassLoader>* class_loader_;
};
@@ -686,8 +693,13 @@ class OatDumper {
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
uint32_t method_access_flags) {
bool success = true;
+ std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
+ if (pretty_method.find(options_->method_filter_) == std::string::npos) {
+ return success;
+ }
+
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
- class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(),
+ class_method_index, pretty_method.c_str(),
dex_method_idx);
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
@@ -2179,6 +2191,8 @@ struct OatdumpArgs {
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--method-filter=")) {
+ method_filter_ = option.substr(strlen("--method-filter=")).data();
} else {
fprintf(stderr, "Unknown argument %s\n", option.data());
usage();
@@ -2200,6 +2214,7 @@ struct OatdumpArgs {
}
const char* oat_filename_ = nullptr;
+ const char* method_filter_ = "";
const char* image_location_ = nullptr;
const char* boot_image_location_ = nullptr;
InstructionSet instruction_set_ = kRuntimeISA;
@@ -2231,6 +2246,7 @@ static int oatdump(int argc, char** argv) {
args.dump_vmap_,
args.disassemble_code_,
absolute_addresses,
+ args.method_filter_,
nullptr));
std::unique_ptr<Runtime> runtime;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4505b8e990..082e8dd9cd 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -465,7 +465,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_SHARED_LIBRARIES := libnativehelper libnativebridge libsigchain
include external/libcxx/libcxx.mk
- LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
+ LOCAL_SHARED_LIBRARIES += libbacktrace
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libdl
# ZipArchive support, the order matters here to get all symbols.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 69527cea54..0109a7c553 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -917,9 +917,10 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
DEFINE_FUNCTION art_quick_set64_instance
+ movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ movd %xmm0, %ebx
// Outgoing argument set up
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index d3a2655c94..b781d6008c 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -236,4 +236,28 @@ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_se
#endif
}
+void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
+ const char* message) {
+#ifdef HAVE_ANDROID_OS
+ // TODO: be more conservative on stack usage here.
+ LogLine(file, line, log_severity, message);
+#else
+ static const char* log_characters = "VDIWEFF";
+ CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
+
+ const char* program_name = ProgramInvocationShortName();
+ write(STDERR_FILENO, program_name, strlen(program_name));
+ write(STDERR_FILENO, " ", 1);
+ write(STDERR_FILENO, &log_characters[log_severity], 1);
+ write(STDERR_FILENO, " ", 1);
+ // TODO: pid and tid.
+ write(STDERR_FILENO, file, strlen(file));
+ // TODO: line.
+ UNUSED(line);
+ write(STDERR_FILENO, "] ", 2);
+ write(STDERR_FILENO, message, strlen(message));
+ write(STDERR_FILENO, "\n", 1);
+#endif
+}
+
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index baa83e35af..ae83e331fd 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -244,6 +244,10 @@ class LogMessage {
// The routine that performs the actual logging.
static void LogLine(const char* file, unsigned int line, LogSeverity severity, const char* msg);
+ // A variant of the above for use with little stack.
+ static void LogLineLowStack(const char* file, unsigned int line, LogSeverity severity,
+ const char* msg);
+
private:
const std::unique_ptr<LogMessageData> data_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 423ea77da9..49579886fd 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -57,7 +57,6 @@ Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
-Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
@@ -202,7 +201,7 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
+ if (held_mutex != NULL) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -918,16 +917,14 @@ void Locks::Init() {
DCHECK(mutator_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
- DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kThreadListSuspendThreadLock;
- DCHECK(thread_list_suspend_thread_lock_ == nullptr);
- thread_list_suspend_thread_lock_ =
- new Mutex("thread list suspend thread by .. lock", current_lock_level);
+ LockLevel current_lock_level = kInstrumentEntrypointsLock;
+ DCHECK(instrument_entrypoints_lock_ == nullptr);
+ instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
if (new_level >= current_lock_level) { \
@@ -938,10 +935,6 @@ void Locks::Init() {
} \
current_lock_level = new_level;
- UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
- DCHECK(instrument_entrypoints_lock_ == nullptr);
- instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
DCHECK(mutator_lock_ == nullptr);
mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d589eb69a8..9c93cc624d 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -101,7 +101,6 @@ enum LockLevel {
kHeapBitmapLock,
kMutatorLock,
kInstrumentEntrypointsLock,
- kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -486,17 +485,8 @@ class Locks {
public:
static void Init();
- // There's a potential race for two threads to try to suspend each other and for both of them
- // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
- // requesting suspension of another at any time. As the the thread list suspend thread logic
- // transitions to runnable, if the current thread were tried to be suspended then this thread
- // would block holding this lock until it could safely request thread suspension of the other
- // thread without that thread having a suspension request against this thread. This avoids a
- // potential deadlock cycle.
- static Mutex* thread_list_suspend_thread_lock_;
-
// Guards allocation entrypoint instrumenting.
- static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
+ static Mutex* instrument_entrypoints_lock_;
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 70807da22e..b2573439fc 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -517,7 +517,6 @@ struct ClassOffsets : public CheckOffsets<mirror::Class> {
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, imtable_), "imTable"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 584743b9c7..e2f6085ae2 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2408,9 +2408,7 @@ JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspen
if (peer.get() == nullptr) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
- // trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ // Suspend thread to build stack trace.
bool timed_out;
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
@@ -3322,13 +3320,9 @@ class ScopedThreadSuspension {
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
- }
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index c6f333f675..2681ad0411 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -18,6 +18,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
+#include "oat_file.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -29,9 +30,9 @@ class DexMethodIteratorTest : public CommonRuntimeTest {
TEST_F(DexMethodIteratorTest, Basic) {
ScopedObjectAccess soa(Thread::Current());
std::vector<const DexFile*> dex_files;
- const char* jars[] = { "core-libart", "conscrypt", "okhttp", "core-junit", "bouncycastle" };
- for (size_t i = 0; i < 5; ++i) {
- dex_files.push_back(LoadExpectSingleDexFile(GetDexFileName(jars[i]).c_str()));
+ CHECK_NE(boot_class_path_.size(), 0U);
+ for (size_t i = 0; i < boot_class_path_.size(); ++i) {
+ dex_files.push_back(boot_class_path_[i]);
}
DexMethodIterator it(dex_files);
while (it.HasNext()) {
@@ -43,7 +44,6 @@ TEST_F(DexMethodIteratorTest, Basic) {
}
it.Next();
}
- STLDeleteElements(&dex_files);
}
} // namespace art
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c4736847b9..ab3ec62d69 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -179,6 +179,10 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
// Now set up the nested signal handler.
+ // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
+ static const int handled_nested_signals[] = {SIGABRT};
+ constexpr size_t num_handled_nested_signals = arraysize(handled_nested_signals);
+
// Release the fault manager so that it will remove the signal chain for
// SIGSEGV and we call the real sigaction.
fault_manager.Release();
@@ -188,33 +192,40 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
// Unblock the signals we allow so that they can be delivered in the signal handler.
sigset_t sigset;
sigemptyset(&sigset);
- sigaddset(&sigset, SIGSEGV);
- sigaddset(&sigset, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&sigset, signal);
+ }
pthread_sigmask(SIG_UNBLOCK, &sigset, nullptr);
// If we get a signal in this code we want to invoke our nested signal
// handler.
- struct sigaction action, oldsegvaction, oldabortaction;
+ struct sigaction action;
+ struct sigaction oldactions[num_handled_nested_signals];
action.sa_sigaction = art_nested_signal_handler;
// Explicitly mask out SIGSEGV and SIGABRT from the nested signal handler. This
// should be the default but we definitely don't want these happening in our
// nested signal handler.
sigemptyset(&action.sa_mask);
- sigaddset(&action.sa_mask, SIGSEGV);
- sigaddset(&action.sa_mask, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&action.sa_mask, signal);
+ }
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
- // Catch SIGSEGV and SIGABRT to invoke our nested handler
- int e1 = sigaction(SIGSEGV, &action, &oldsegvaction);
- int e2 = sigaction(SIGABRT, &action, &oldabortaction);
- if (e1 != 0 || e2 != 0) {
- LOG(ERROR) << "Unable to set up nested signal handler";
- } else {
+ // Catch handled signals to invoke our nested handler.
+ bool success = true;
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &action, &oldactions[i]) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to set up nested signal handler";
+ break;
+ }
+ }
+ if (success) {
// Save the current state and call the handlers. If anything causes a signal
// our nested signal handler will be invoked and this will longjmp to the saved
// state.
@@ -223,8 +234,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
if (handler->Action(sig, info, context)) {
// Restore the signal handlers, reinit the fault manager and return. Signal was
// handled.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
fault_manager.Init();
return;
}
@@ -234,8 +249,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
}
// Restore the signal handlers.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
}
// Now put the fault manager back in place.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index f03ea31098..b23212842a 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -71,17 +71,19 @@ static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta)
}
// We are relocating or generating the core image. We should get rid of everything. It is all
-// out-of-date. We also don't really care if this fails since it is just a convienence.
+// out-of-date. We also don't really care if this fails since it is just a convenience.
// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
// Note this should only be used during first boot.
static void RealPruneDexCache(const std::string& cache_dir_path);
+
static void PruneDexCache(InstructionSet isa) {
CHECK_NE(isa, kNone);
- // Prune the base /data/dalvik-cache
+ // Prune the base /data/dalvik-cache.
RealPruneDexCache(GetDalvikCacheOrDie(".", false));
- // prune /data/dalvik-cache/<isa>
+ // Prune /data/dalvik-cache/<isa>.
RealPruneDexCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
}
+
static void RealPruneDexCache(const std::string& cache_dir_path) {
if (!OS::DirectoryExists(cache_dir_path.c_str())) {
return;
@@ -97,8 +99,8 @@ static void RealPruneDexCache(const std::string& cache_dir_path) {
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
continue;
}
- // We only want to delete regular files.
- if (de->d_type != DT_REG) {
+ // We only want to delete regular files and symbolic links.
+ if (de->d_type != DT_REG && de->d_type != DT_LNK) {
if (de->d_type != DT_DIR) {
// We do expect some directories (namely the <isa> for pruning the base dalvik-cache).
LOG(WARNING) << "Unexpected file type of " << std::hex << de->d_type << " encountered.";
diff --git a/runtime/image.cc b/runtime/image.cc
index 40f43463ef..aee84bc3c0 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '2', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 19e03d8f82..a5abce6ab1 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -795,13 +795,13 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
return JNI_OK;
}
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms_buf, jsize buf_len, jsize* vm_count) {
Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
+ if (runtime == nullptr || buf_len == 0) {
*vm_count = 0;
} else {
*vm_count = 1;
- vms[0] = runtime->GetJavaVM();
+ vms_buf[0] = runtime->GetJavaVM();
}
return JNI_OK;
}
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
new file mode 100644
index 0000000000..60c6a5c23a
--- /dev/null
+++ b/runtime/java_vm_ext_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <pthread.h>
+
+#include "common_runtime_test.h"
+#include "java_vm_ext.h"
+#include "runtime.h"
+
+namespace art {
+
+class JavaVmExtTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+
+ vm_ = Runtime::Current()->GetJavaVM();
+ }
+
+
+ virtual void TearDown() OVERRIDE {
+ CommonRuntimeTest::TearDown();
+ }
+
+ JavaVMExt* vm_;
+};
+
+TEST_F(JavaVmExtTest, JNI_GetDefaultJavaVMInitArgs) {
+ jint err = JNI_GetDefaultJavaVMInitArgs(nullptr);
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+TEST_F(JavaVmExtTest, JNI_GetCreatedJavaVMs) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ EXPECT_EQ(1, num_vms);
+ EXPECT_EQ(vms_buf[0], vm_);
+}
+
+static bool gSmallStack = false;
+static bool gAsDaemon = false;
+
+static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ JNIEnv* env;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ if (ok == JNI_OK) {
+ if (!gAsDaemon) {
+ ok = vms_buf[0]->AttachCurrentThread(&env, nullptr);
+ } else {
+ ok = vms_buf[0]->AttachCurrentThreadAsDaemon(&env, nullptr);
+ }
+ EXPECT_EQ(gSmallStack ? JNI_ERR : JNI_OK, ok);
+ if (ok == JNI_OK) {
+ ok = vms_buf[0]->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThreadAsDaemon) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = true;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread_SmallStack) {
+ pthread_t pthread;
+ pthread_attr_t attr;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = true;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, PTHREAD_STACK_MIN), reason);
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, &attr, attach_current_thread_callback,
+ nullptr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, DetachCurrentThread) {
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+
+ jint err = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+} // namespace art
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index e8c08561d0..7119ce5dbb 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -272,7 +272,7 @@ bool JdwpSocketState::Establish(const JdwpOptions* options) {
/*
* Start by resolving the host name.
*/
-#ifdef HAVE_GETHOSTBYNAME_R
+#if defined(__linux__)
hostent he;
char auxBuf[128];
int error;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index cab907c378..ccad137164 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1170,7 +1170,15 @@ TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ EXPECT_EQ(0, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("java_array == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(JNI_ERR, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("jarray was NULL");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetObjectClass) {
@@ -2011,14 +2019,4 @@ TEST_F(JniInternalTest, MonitorEnterExit) {
}
}
-TEST_F(JniInternalTest, DetachCurrentThread) {
- CleanUpJniEnv(); // cleanup now so TearDown won't have junk from wrong JNIEnv
- jint ok = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_OK, ok);
-
- jint err = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_ERR, err);
- vm_->AttachCurrentThread(&env_, nullptr); // need attached thread for CommonRuntimeTest::TearDown
-}
-
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 21cf53f8b3..4f1af44794 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1076,9 +1076,6 @@ class MANAGED Class FINAL : public Object {
// methods for the methods in the interface.
HeapReference<IfTable> iftable_;
- // Interface method table (imt), for quick "invoke-interface".
- HeapReference<ObjectArray<ArtMethod>> imtable_;
-
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 6445b88b68..233267b53c 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -655,8 +655,6 @@ void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWo
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index e396dad5c3..2cdc68f5b2 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -38,12 +38,7 @@ static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject p
soa.Self()->TransitionFromRunnableToSuspended(kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 0722a2485d..420e9df2f2 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -133,11 +133,7 @@ static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
- Thread* thread;
- {
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index b74430f237..987427ea79 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -63,12 +63,7 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th
}
// Suspend thread to build stack trace.
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 6bda6beac4..0749c06551 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '4', '5', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '6', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 94f7585bea..e30e745154 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -85,7 +85,7 @@ inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
- if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
if (held_mutex != NULL) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2c44f27f3f..c769faf3e5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -158,7 +158,7 @@ void* Thread::CreateCallback(void* arg) {
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
CHECK(!runtime->IsShuttingDownLocked());
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
Runtime::Current()->EndThreadBirth();
}
{
@@ -348,40 +348,46 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
}
}
-void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
+bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
// This function does all the initialization that must be run by the native thread it applies to.
// (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
// we can handshake with the corresponding native thread when it's ready.) Check this native
// thread hasn't been through here already...
CHECK(Thread::Current() == nullptr);
+
+ // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
+ // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
+ tlsPtr_.pthread_self = pthread_self();
+ CHECK(is_started_);
+
SetUpAlternateSignalStack();
+ if (!InitStackHwm()) {
+ return false;
+ }
InitCpu();
InitTlsEntryPoints();
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
- // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
- tlsPtr_.pthread_self = pthread_self();
- CHECK(is_started_);
+
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
- InitStackHwm();
tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
thread_list->Register(this);
+ return true;
}
Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- Thread* self;
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
return nullptr;
}
+ Thread* self;
{
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
@@ -390,8 +396,12 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
} else {
Runtime::Current()->StartThreadBirth();
self = new Thread(as_daemon);
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
+ if (!init_success) {
+ delete self;
+ return nullptr;
+ }
}
}
@@ -431,6 +441,11 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
thread_group = runtime->GetMainThreadGroup();
}
ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
+ // Add missing null check in case of OOM b/18297817
+ if (name != nullptr && thread_name.get() == nullptr) {
+ CHECK(IsExceptionPending());
+ return;
+ }
jint thread_priority = GetNativePriority();
jboolean thread_is_daemon = as_daemon;
@@ -494,7 +509,7 @@ void Thread::SetThreadName(const char* name) {
Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
}
-void Thread::InitStackHwm() {
+bool Thread::InitStackHwm() {
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
@@ -516,8 +531,10 @@ void Thread::InitStackHwm() {
uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
+ 4 * KB;
if (read_stack_size <= min_stack) {
- LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
- << " bytes)";
+ // Note, as we know the stack is small, avoid operations that could use a lot of stack.
+ LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
+ "Attempt to attach a thread with a too-small stack");
+ return false;
}
// Set stack_end_ to the bottom of the stack saving space of stack overflows
@@ -542,6 +559,8 @@ void Thread::InitStackHwm() {
// Sanity check.
int stack_variable;
CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
+
+ return true;
}
void Thread::ShortDump(std::ostream& os) const {
@@ -1042,7 +1061,8 @@ void Thread::Startup() {
}
// Allocate a TLS slot.
- CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
+ CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
+ "self key");
// Double-check the TLS slot allocation.
if (pthread_getspecific(pthread_key_self_) != nullptr) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 89aee04e5d..7e567fb77c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -893,14 +893,14 @@ class Thread {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ bool Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
void InitCardTable();
void InitCpu();
void CleanupCpu();
void InitTlsEntryPoints();
void InitTid();
void InitPthreadKeySelf();
- void InitStackHwm();
+ bool InitStackHwm();
void SetUpAlternateSignalStack();
void TearDownAlternateSignalStack();
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 675ce9acbc..5ff90d6392 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -530,6 +530,12 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (request_suspension) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
request_suspension = false;
did_suspend_request = true;
@@ -608,6 +614,12 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (suspended_thread == nullptr) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
suspended_thread = thread;
} else {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index a7f2c539af..13684c7668 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,6 @@ class ThreadList {
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -78,7 +77,6 @@ class ThreadList {
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index f2d710dc4a..9a4c8759b8 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1120,7 +1120,8 @@ std::string GetSchedulerGroupName(pid_t tid) {
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
mirror::ArtMethod* current_method) {
-#ifdef __linux__
+ // TODO: enable on __linux__ b/15446488.
+#if 0
// b/18119146
if (RUNNING_ON_VALGRIND != 0) {
return;
diff --git a/test/411-optimizing-arith/src/Main.java b/test/411-optimizing-arith/src/Main.java
index a22c516ff4..3a5d7c05c9 100644
--- a/test/411-optimizing-arith/src/Main.java
+++ b/test/411-optimizing-arith/src/Main.java
@@ -101,7 +101,7 @@ public class Main {
expectEquals(0L, $opt$Mul(3L, 0L));
expectEquals(-3L, $opt$Mul(1L, -3L));
expectEquals(36L, $opt$Mul(-12L, -3L));
- expectEquals(33L, $opt$Mul(1L, 3L) * 11F);
+ expectEquals(33L, $opt$Mul(1L, 3L) * 11L);
expectEquals(240518168583L, $opt$Mul(34359738369L, 7L)); // (2^35 + 1) * 7
}
diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java
index e2850ca760..d9f8bcf0c2 100644
--- a/test/415-optimizing-arith-neg/src/Main.java
+++ b/test/415-optimizing-arith-neg/src/Main.java
@@ -71,8 +71,8 @@ public class Main {
assertEquals(0, $opt$NegInt(0));
assertEquals(51, $opt$NegInt(-51));
assertEquals(-51, $opt$NegInt(51));
- assertEquals(2147483647, $opt$NegInt(-2147483647)); // (2^31 - 1)
- assertEquals(-2147483647, $opt$NegInt(2147483647)); // -(2^31 - 1)
+ assertEquals(2147483647, $opt$NegInt(-2147483647)); // -(2^31 - 1)
+ assertEquals(-2147483647, $opt$NegInt(2147483647)); // 2^31 - 1
// From the Java 7 SE Edition specification:
// http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4
//
@@ -98,13 +98,13 @@ public class Main {
assertEquals(51L, $opt$NegLong(-51L));
assertEquals(-51L, $opt$NegLong(51L));
- assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // (2^31 - 1)
- assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // -(2^31 - 1)
- assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // 2^31
- assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // -(2^31)
+ assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // -(2^31 - 1)
+ assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // (2^31 - 1)
+ assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // -(2^31)
+ assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // 2^31
- assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // (2^63 - 1)
- assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // 2^63 - 1
// See remark regarding the negation of the maximum negative
// (long) value in negInt().
assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
diff --git a/test/416-optimizing-arith-not/src/Main.java b/test/416-optimizing-arith-not/src/Main.java
index 26e206c94d..44c7d3cfb0 100644
--- a/test/416-optimizing-arith-not/src/Main.java
+++ b/test/416-optimizing-arith-not/src/Main.java
@@ -40,10 +40,10 @@ public class Main {
expectEquals(0, smaliNotInt(-1));
expectEquals(-1, smaliNotInt(0));
expectEquals(-2, smaliNotInt(1));
- expectEquals(2147483647, smaliNotInt(-2147483648)); // (2^31) - 1
- expectEquals(2147483646, smaliNotInt(-2147483647)); // (2^31) - 2
- expectEquals(-2147483647, smaliNotInt(2147483646)); // -(2^31) - 1
- expectEquals(-2147483648, smaliNotInt(2147483647)); // -(2^31)
+ expectEquals(2147483647, smaliNotInt(-2147483648)); // -(2^31)
+ expectEquals(2147483646, smaliNotInt(-2147483647)); // -(2^31 - 1)
+ expectEquals(-2147483647, smaliNotInt(2147483646)); // 2^31 - 2
+ expectEquals(-2147483648, smaliNotInt(2147483647)); // 2^31 - 1
}
private static void notLong() throws Exception {
@@ -51,14 +51,14 @@ public class Main {
expectEquals(0L, smaliNotLong(-1L));
expectEquals(-1L, smaliNotLong(0L));
expectEquals(-2L, smaliNotLong(1L));
- expectEquals(2147483647L, smaliNotLong(-2147483648L)); // (2^31) - 1
- expectEquals(2147483646L, smaliNotLong(-2147483647L)); // (2^31) - 2
- expectEquals(-2147483647L, smaliNotLong(2147483646L)); // -(2^31) - 1
- expectEquals(-2147483648L, smaliNotLong(2147483647L)); // -(2^31)
- expectEquals(9223372036854775807L, smaliNotLong(-9223372036854775808L)); // (2^63) - 1
- expectEquals(9223372036854775806L, smaliNotLong(-9223372036854775807L)); // (2^63) - 2
- expectEquals(-9223372036854775807L, smaliNotLong(9223372036854775806L)); // -(2^63) - 1
- expectEquals(-9223372036854775808L, smaliNotLong(9223372036854775807L)); // -(2^63)
+ expectEquals(2147483647L, smaliNotLong(-2147483648L)); // -(2^31)
+ expectEquals(2147483646L, smaliNotLong(-2147483647L)); // -(2^31 - 1)
+ expectEquals(-2147483647L, smaliNotLong(2147483646L)); // 2^31 - 2
+ expectEquals(-2147483648L, smaliNotLong(2147483647L)); // 2^31 - 1
+ expectEquals(9223372036854775807L, smaliNotLong(-9223372036854775808L)); // -(2^63)
+ expectEquals(9223372036854775806L, smaliNotLong(-9223372036854775807L)); // -(2^63 - 1)
+ expectEquals(-9223372036854775807L, smaliNotLong(9223372036854775806L)); // 2^63 - 2
+ expectEquals(-9223372036854775808L, smaliNotLong(9223372036854775807L)); // 2^63 - 1
}
// Wrappers around methods located in file not.smali.
diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java
index 5825d24dda..a5dea15559 100644
--- a/test/417-optimizing-arith-div/src/Main.java
+++ b/test/417-optimizing-arith-div/src/Main.java
@@ -78,18 +78,33 @@ public class Main {
} catch (java.lang.RuntimeException e) {
}
}
+
+ public static void expectDivisionByZero(long value) {
+ try {
+ $opt$Div(value, 0L);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
public static void main(String[] args) {
div();
}
public static void div() {
divInt();
+ divLong();
divFloat();
divDouble();
}
private static void divInt() {
- expectEquals(2, $opt$DivLit(6));
+ expectEquals(2, $opt$DivConst(6));
expectEquals(2, $opt$Div(6, 3));
expectEquals(6, $opt$Div(6, 1));
expectEquals(-2, $opt$Div(6, -3));
@@ -111,6 +126,35 @@ public class Main {
expectDivisionByZero(Integer.MIN_VALUE);
}
+ private static void divLong() {
+ expectEquals(2L, $opt$DivConst(6L));
+ expectEquals(2L, $opt$Div(6L, 3L));
+ expectEquals(6L, $opt$Div(6L, 1L));
+ expectEquals(-2L, $opt$Div(6L, -3L));
+ expectEquals(1L, $opt$Div(4L, 3L));
+ expectEquals(-1L, $opt$Div(4L, -3L));
+ expectEquals(5L, $opt$Div(23L, 4L));
+ expectEquals(-5L, $opt$Div(-23L, 4L));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1L));
+ expectEquals(2147483648L, $opt$Div(Integer.MIN_VALUE, -1L));
+ expectEquals(-1073741824L, $opt$Div(Integer.MIN_VALUE, 2L));
+
+ expectEquals(-Long.MAX_VALUE, $opt$Div(Long.MAX_VALUE, -1L));
+ expectEquals(Long.MIN_VALUE, $opt$Div(Long.MIN_VALUE, -1L)); // overflow
+
+ expectEquals(11111111111111L, $opt$Div(33333333333333L, 3L));
+ expectEquals(3L, $opt$Div(33333333333333L, 11111111111111L));
+
+ expectEquals(0L, $opt$Div(0L, Long.MAX_VALUE));
+ expectEquals(0L, $opt$Div(0L, Long.MIN_VALUE));
+
+ expectDivisionByZero(0L);
+ expectDivisionByZero(1L);
+ expectDivisionByZero(Long.MAX_VALUE);
+ expectDivisionByZero(Long.MIN_VALUE);
+ }
+
private static void divFloat() {
expectApproxEquals(1.6666666F, $opt$Div(5F, 3F));
expectApproxEquals(0F, $opt$Div(0F, 3F));
@@ -178,10 +222,22 @@ public class Main {
}
// Division by literals != 0 should not generate checks.
- static int $opt$DivLit(int a) {
+ static int $opt$DivConst(int a) {
return a / 3;
}
+ static long $opt$DivConst(long a) {
+ return a / 3L;
+ }
+
+ static long $opt$Div(long a, long b) {
+ return a / b;
+ }
+
+ static long $opt$DivZero(long a) {
+ return a / 0L;
+ }
+
static float $opt$Div(float a, float b) {
return a / b;
}
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
index d2ffc5bdfc..7c5ddbab13 100644
--- a/test/422-type-conversion/src/Main.java
+++ b/test/422-type-conversion/src/Main.java
@@ -18,7 +18,19 @@
// it does compile the method.
public class Main {
- public static void assertEquals(long expected, long result) {
+ public static void assertByteEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
}
@@ -28,53 +40,153 @@ public class Main {
byteToLong();
shortToLong();
intToLong();
+ charToLong();
+
+ longToInt();
+
+ shortToByte();
+ intToByte();
+ charToByte();
}
private static void byteToLong() {
- assertEquals(1L, $opt$ByteToLong((byte)1));
- assertEquals(0L, $opt$ByteToLong((byte)0));
- assertEquals(-1L, $opt$ByteToLong((byte)-1));
- assertEquals(51L, $opt$ByteToLong((byte)51));
- assertEquals(-51L, $opt$ByteToLong((byte)-51));
- assertEquals(127L, $opt$ByteToLong((byte)127)); // (2^7) - 1
- assertEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7) - 1
- assertEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
+ assertLongEquals(1L, $opt$ByteToLong((byte)1));
+ assertLongEquals(0L, $opt$ByteToLong((byte)0));
+ assertLongEquals(-1L, $opt$ByteToLong((byte)-1));
+ assertLongEquals(51L, $opt$ByteToLong((byte)51));
+ assertLongEquals(-51L, $opt$ByteToLong((byte)-51));
+ assertLongEquals(127L, $opt$ByteToLong((byte)127)); // 2^7 - 1
+ assertLongEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7 - 1)
+ assertLongEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
}
private static void shortToLong() {
- assertEquals(1L, $opt$ShortToLong((short)1));
- assertEquals(0L, $opt$ShortToLong((short)0));
- assertEquals(-1L, $opt$ShortToLong((short)-1));
- assertEquals(51L, $opt$ShortToLong((short)51));
- assertEquals(-51L, $opt$ShortToLong((short)-51));
- assertEquals(32767L, $opt$ShortToLong((short)32767)); // (2^15) - 1
- assertEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15) - 1
- assertEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
+ assertLongEquals(1L, $opt$ShortToLong((short)1));
+ assertLongEquals(0L, $opt$ShortToLong((short)0));
+ assertLongEquals(-1L, $opt$ShortToLong((short)-1));
+ assertLongEquals(51L, $opt$ShortToLong((short)51));
+ assertLongEquals(-51L, $opt$ShortToLong((short)-51));
+ assertLongEquals(32767L, $opt$ShortToLong((short)32767)); // 2^15 - 1
+ assertLongEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15 - 1)
+ assertLongEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
}
private static void intToLong() {
- assertEquals(1L, $opt$IntToLong(1));
- assertEquals(0L, $opt$IntToLong(0));
- assertEquals(-1L, $opt$IntToLong(-1));
- assertEquals(51L, $opt$IntToLong(51));
- assertEquals(-51L, $opt$IntToLong(-51));
- assertEquals(2147483647L, $opt$IntToLong(2147483647)); // (2^31) - 1
- assertEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31) - 1
- assertEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ assertLongEquals(1L, $opt$IntToLong(1));
+ assertLongEquals(0L, $opt$IntToLong(0));
+ assertLongEquals(-1L, $opt$IntToLong(-1));
+ assertLongEquals(51L, $opt$IntToLong(51));
+ assertLongEquals(-51L, $opt$IntToLong(-51));
+ assertLongEquals(2147483647L, $opt$IntToLong(2147483647)); // 2^31 - 1
+ assertLongEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31 - 1)
+ assertLongEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ }
+
+ private static void charToLong() {
+ assertLongEquals(1L, $opt$CharToLong((char)1));
+ assertLongEquals(0L, $opt$CharToLong((char)0));
+ assertLongEquals(51L, $opt$CharToLong((char)51));
+ assertLongEquals(32767L, $opt$CharToLong((char)32767)); // 2^15 - 1
+ assertLongEquals(65535L, $opt$CharToLong((char)65535)); // 2^16 - 1
+
+ assertLongEquals(0L, $opt$CharToLong('\u0000'));
+ assertLongEquals(65535L, $opt$CharToLong('\uFFFF')); // 2^16 - 1
+
+ assertLongEquals(65535L, $opt$CharToLong((char)-1));
+ assertLongEquals(65485L, $opt$CharToLong((char)-51));
+ assertLongEquals(32769L, $opt$CharToLong((char)-32767)); // -(2^15 - 1)
+ assertLongEquals(32768L, $opt$CharToLong((char)-32768)); // -(2^15)
}
- static long $opt$ByteToLong(byte a) {
- // Translates to an int-to-long Dex instruction.
- return a;
+ private static void longToInt() {
+ assertIntEquals(1, $opt$LongToInt(1L));
+ assertIntEquals(0, $opt$LongToInt(0L));
+ assertIntEquals(-1, $opt$LongToInt(-1L));
+ assertIntEquals(51, $opt$LongToInt(51L));
+ assertIntEquals(-51, $opt$LongToInt(-51L));
+ assertIntEquals(2147483647, $opt$LongToInt(2147483647L)); // 2^31 - 1
+ assertIntEquals(-2147483647, $opt$LongToInt(-2147483647L)); // -(2^31 - 1)
+ assertIntEquals(-2147483648, $opt$LongToInt(-2147483648L)); // -(2^31)
+ assertIntEquals(-2147483648, $opt$LongToInt(2147483648L)); // (2^31)
+ assertIntEquals(2147483647, $opt$LongToInt(-2147483649L)); // -(2^31 + 1)
+ assertIntEquals(-1, $opt$LongToInt(9223372036854775807L)); // 2^63 - 1
+ assertIntEquals(1, $opt$LongToInt(-9223372036854775807L)); // -(2^63 - 1)
+ assertIntEquals(0, $opt$LongToInt(-9223372036854775808L)); // -(2^63)
+
+ assertIntEquals(42, $opt$LongLiteralToInt());
+
+ // Ensure long-to-int conversions truncates values as expected.
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(4294967297L))); // 2^32 + 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(4294967296L))); // 2^32
+ assertLongEquals(-1L, $opt$IntToLong($opt$LongToInt(4294967295L))); // 2^32 - 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(0L)));
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(-4294967295L))); // -(2^32 - 1)
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(-4294967296L))); // -(2^32)
+ assertLongEquals(-1, $opt$IntToLong($opt$LongToInt(-4294967297L))); // -(2^32 + 1)
}
- static long $opt$ShortToLong(short a) {
- // Translates to an int-to-long Dex instruction.
- return a;
+ private static void shortToByte() {
+ assertByteEquals((byte)1, $opt$ShortToByte((short)1));
+ assertByteEquals((byte)0, $opt$ShortToByte((short)0));
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)-1));
+ assertByteEquals((byte)51, $opt$ShortToByte((short)51));
+ assertByteEquals((byte)-51, $opt$ShortToByte((short)-51));
+ assertByteEquals((byte)127, $opt$ShortToByte((short)127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$ShortToByte((short)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)128)); // 2^7
+ assertByteEquals((byte)127, $opt$ShortToByte((short)-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)32767)); // 2^15 - 1
+ assertByteEquals((byte)0, $opt$ShortToByte((short)-32768)); // -(2^15)
}
- static long $opt$IntToLong(int a) {
- // Translates to an int-to-long Dex instruction.
- return a;
+ private static void intToByte() {
+ assertByteEquals((byte)1, $opt$IntToByte(1));
+ assertByteEquals((byte)0, $opt$IntToByte(0));
+ assertByteEquals((byte)-1, $opt$IntToByte(-1));
+ assertByteEquals((byte)51, $opt$IntToByte(51));
+ assertByteEquals((byte)-51, $opt$IntToByte(-51));
+ assertByteEquals((byte)127, $opt$IntToByte(127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$IntToByte(-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$IntToByte(-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$IntToByte(128)); // 2^7
+ assertByteEquals((byte)127, $opt$IntToByte(-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$IntToByte(2147483647)); // 2^31 - 1
+ assertByteEquals((byte)0, $opt$IntToByte(-2147483648)); // -(2^31)
}
+
+ private static void charToByte() {
+ assertByteEquals((byte)1, $opt$CharToByte((char)1));
+ assertByteEquals((byte)0, $opt$CharToByte((char)0));
+ assertByteEquals((byte)51, $opt$CharToByte((char)51));
+ assertByteEquals((byte)127, $opt$CharToByte((char)127)); // 2^7 - 1
+ assertByteEquals((byte)-128, $opt$CharToByte((char)128)); // 2^7
+ assertByteEquals((byte)-1, $opt$CharToByte((char)32767)); // 2^15 - 1
+ assertByteEquals((byte)-1, $opt$CharToByte((char)65535)); // 2^16 - 1
+
+ assertByteEquals((byte)0, $opt$CharToByte('\u0000'));
+ assertByteEquals((byte)-1, $opt$CharToByte('\uFFFF')); // 2^16 - 1
+
+ assertByteEquals((byte)-1, $opt$CharToByte((char)-1));
+ assertByteEquals((byte)-51, $opt$CharToByte((char)-51));
+ assertByteEquals((byte)-127, $opt$CharToByte((char)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$CharToByte((char)-128)); // -(2^7)
+ assertByteEquals((byte)127, $opt$CharToByte((char)-129)); // -(2^7 + 1)
+ }
+
+
+ // These methods produce int-to-long Dex instructions.
+ static long $opt$ByteToLong(byte a) { return a; }
+ static long $opt$ShortToLong(short a) { return a; }
+ static long $opt$IntToLong(int a) { return a; }
+ static long $opt$CharToLong(int a) { return a; }
+
+ // These methods produce long-to-int Dex instructions.
+ static int $opt$LongToInt(long a){ return (int)a; }
+ static int $opt$LongLiteralToInt(){ return (int)42L; }
+
+ // These methods produce int-to-byte Dex instructions.
+ static byte $opt$ShortToByte(short a){ return (byte)a; }
+ static byte $opt$IntToByte(int a){ return (byte)a; }
+ static byte $opt$CharToByte(char a){ return (byte)a; }
}
diff --git a/test/424-checkcast/expected.txt b/test/424-checkcast/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/424-checkcast/expected.txt
diff --git a/test/424-checkcast/info.txt b/test/424-checkcast/info.txt
new file mode 100644
index 0000000000..b50b082651
--- /dev/null
+++ b/test/424-checkcast/info.txt
@@ -0,0 +1 @@
+Simple tests for the checkcast opcode.
diff --git a/test/424-checkcast/src/Main.java b/test/424-checkcast/src/Main.java
new file mode 100644
index 0000000000..791b166609
--- /dev/null
+++ b/test/424-checkcast/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static Object $opt$CheckCastMain() {
+ return (Main)a;
+ }
+
+ public static Object $opt$CheckCastFinalClass() {
+ return (FinalClass)a;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ $opt$CheckCastMain();
+
+ a = null;
+ $opt$CheckCastMain();
+
+ a = new MainChild();
+ $opt$CheckCastMain();
+
+ a = new Object();
+ try {
+ $opt$CheckCastMain();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ $opt$CheckCastFinalClass();
+
+ a = null;
+ $opt$CheckCastFinalClass();
+
+ a = new Main();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+
+ a = new Object();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/425-invoke-super/expected.txt b/test/425-invoke-super/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/425-invoke-super/expected.txt
diff --git a/test/425-invoke-super/info.txt b/test/425-invoke-super/info.txt
new file mode 100644
index 0000000000..ad99030a85
--- /dev/null
+++ b/test/425-invoke-super/info.txt
@@ -0,0 +1 @@
+Tests the invoke-super opcode.
diff --git a/test/425-invoke-super/smali/invokesuper.smali b/test/425-invoke-super/smali/invokesuper.smali
new file mode 100644
index 0000000000..ab1309161e
--- /dev/null
+++ b/test/425-invoke-super/smali/invokesuper.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public LInvokeSuper;
+.super LSuperClass;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LSuperClass;-><init>()V
+ return-void
+.end method
+
+
+.method public run()I
+.registers 2
+ # Do an invoke super on a non-super class to force slow path.
+ invoke-super {v1}, LInvokeSuper;->returnInt()I
+ move-result v0
+ return v0
+.end method
+
+
+.method public returnInt()I
+.registers 2
+ const v0, 777
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/subclass.smali b/test/425-invoke-super/smali/subclass.smali
new file mode 100644
index 0000000000..54e3474078
--- /dev/null
+++ b/test/425-invoke-super/smali/subclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSubClass;
+.super LInvokeSuper;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LInvokeSuper;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 0
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/superclass.smali b/test/425-invoke-super/smali/superclass.smali
new file mode 100644
index 0000000000..b366aa7a91
--- /dev/null
+++ b/test/425-invoke-super/smali/superclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSuperClass;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 42
+ return v0
+.end method
diff --git a/test/425-invoke-super/src/Main.java b/test/425-invoke-super/src/Main.java
new file mode 100644
index 0000000000..1fb62d0871
--- /dev/null
+++ b/test/425-invoke-super/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static class A {
+ public int foo() { return 1; }
+ }
+
+ static class B extends A {
+ public int $opt$bar() { return super.foo(); }
+ }
+
+ static class C extends B {
+ public int foo() { return 42; }
+ }
+
+ static class D extends C {
+ }
+
+ static void assertEquals(int expected, int value) {
+ if (expected != value) {
+ throw new Error("Expected " + expected + ", got " + value);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ assertEquals(1, new B().$opt$bar());
+ assertEquals(1, new C().$opt$bar());
+ assertEquals(1, new D().$opt$bar());
+
+ Class<?> c = Class.forName("InvokeSuper");
+ Method m = c.getMethod("run");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+
+ c = Class.forName("SubClass");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+ }
+}
diff --git a/test/426-monitor/expected.txt b/test/426-monitor/expected.txt
new file mode 100644
index 0000000000..2ffeff4be3
--- /dev/null
+++ b/test/426-monitor/expected.txt
@@ -0,0 +1,5 @@
+In static method
+In instance method
+In synchronized block
+In second instance method
+In second static method
diff --git a/test/426-monitor/info.txt b/test/426-monitor/info.txt
new file mode 100644
index 0000000000..1b093ea44e
--- /dev/null
+++ b/test/426-monitor/info.txt
@@ -0,0 +1 @@
+Simple tests for monitorenter/monitorexit.
diff --git a/test/426-monitor/src/Main.java b/test/426-monitor/src/Main.java
new file mode 100644
index 0000000000..a073a95918
--- /dev/null
+++ b/test/426-monitor/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ $opt$StaticSynchronizedMethod();
+ new Main().$opt$InstanceSynchronizedMethod();
+ $opt$SynchronizedBlock();
+ new Main().$opt$DoubleInstanceSynchronized();
+ $opt$DoubleStaticSynchronized();
+ }
+
+ public static synchronized void $opt$StaticSynchronizedMethod() {
+ System.out.println("In static method");
+ }
+
+ public synchronized void $opt$InstanceSynchronizedMethod() {
+ System.out.println("In instance method");
+ }
+
+ public static void $opt$SynchronizedBlock() {
+ Object o = new Object();
+ synchronized(o) {
+ System.out.println("In synchronized block");
+ }
+ }
+
+ public synchronized void $opt$DoubleInstanceSynchronized() {
+ synchronized (this) {
+ System.out.println("In second instance method");
+ }
+ }
+
+ public synchronized static void $opt$DoubleStaticSynchronized() {
+ synchronized (Main.class) {
+ System.out.println("In second static method");
+ }
+ }
+}
diff --git a/test/427-bitwise/expected.txt b/test/427-bitwise/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/427-bitwise/expected.txt
diff --git a/test/427-bitwise/info.txt b/test/427-bitwise/info.txt
new file mode 100644
index 0000000000..47628479c7
--- /dev/null
+++ b/test/427-bitwise/info.txt
@@ -0,0 +1 @@
+Tests for the and/or/xor opcodes.
diff --git a/test/427-bitwise/src/Main.java b/test/427-bitwise/src/Main.java
new file mode 100644
index 0000000000..e9840669dd
--- /dev/null
+++ b/test/427-bitwise/src/Main.java
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ andInt();
+ andLong();
+
+ orInt();
+ orLong();
+
+ xorInt();
+ xorLong();
+ }
+
+ private static void andInt() {
+ expectEquals(1, $opt$And(5, 3));
+ expectEquals(0, $opt$And(0, 0));
+ expectEquals(0, $opt$And(0, 3));
+ expectEquals(0, $opt$And(3, 0));
+ expectEquals(1, $opt$And(1, -3));
+ expectEquals(-12, $opt$And(-12, -3));
+
+ expectEquals(1, $opt$AndLit8(1));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(3, $opt$AndLit8(3));
+ expectEquals(4, $opt$AndLit8(-12));
+
+ expectEquals(0, $opt$AndLit16(1));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(3));
+ expectEquals(65280, $opt$AndLit16(-12));
+ }
+
+ private static void andLong() {
+ expectEquals(1L, $opt$And(5L, 3L));
+ expectEquals(0L, $opt$And(0L, 0L));
+ expectEquals(0L, $opt$And(0L, 3L));
+ expectEquals(0L, $opt$And(3L, 0L));
+ expectEquals(1L, $opt$And(1L, -3L));
+ expectEquals(-12L, $opt$And(-12L, -3L));
+
+ expectEquals(1L, $opt$AndLit8(1L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(3L, $opt$AndLit8(3L));
+ expectEquals(4L, $opt$AndLit8(-12L));
+
+ expectEquals(0L, $opt$AndLit16(1L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(3L));
+ expectEquals(65280L, $opt$AndLit16(-12L));
+ }
+
+ static int $opt$And(int a, int b) {
+ return a & b;
+ }
+
+ static int $opt$AndLit8(int a) {
+ return a & 0xF;
+ }
+
+ static int $opt$AndLit16(int a) {
+ return a & 0xFF00;
+ }
+
+ static long $opt$And(long a, long b) {
+ return a & b;
+ }
+
+ static long $opt$AndLit8(long a) {
+ return a & 0xF;
+ }
+
+ static long $opt$AndLit16(long a) {
+ return a & 0xFF00;
+ }
+
+ private static void orInt() {
+ expectEquals(7, $opt$Or(5, 3));
+ expectEquals(0, $opt$Or(0, 0));
+ expectEquals(3, $opt$Or(0, 3));
+ expectEquals(3, $opt$Or(3, 0));
+ expectEquals(-3, $opt$Or(1, -3));
+ expectEquals(-3, $opt$Or(-12, -3));
+
+ expectEquals(15, $opt$OrLit8(1));
+ expectEquals(15, $opt$OrLit8(0));
+ expectEquals(15, $opt$OrLit8(3));
+ expectEquals(-1, $opt$OrLit8(-12));
+
+ expectEquals(0xFF01, $opt$OrLit16(1));
+ expectEquals(0xFF00, $opt$OrLit16(0));
+ expectEquals(0xFF03, $opt$OrLit16(3));
+ expectEquals(-12, $opt$OrLit16(-12));
+ }
+
+ private static void orLong() {
+ expectEquals(7L, $opt$Or(5L, 3L));
+ expectEquals(0L, $opt$Or(0L, 0L));
+ expectEquals(3L, $opt$Or(0L, 3L));
+ expectEquals(3L, $opt$Or(3L, 0L));
+ expectEquals(-3L, $opt$Or(1L, -3L));
+ expectEquals(-3L, $opt$Or(-12L, -3L));
+
+ expectEquals(15L, $opt$OrLit8(1L));
+ expectEquals(15L, $opt$OrLit8(0L));
+ expectEquals(15L, $opt$OrLit8(3L));
+ expectEquals(-1L, $opt$OrLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$OrLit16(1L));
+ expectEquals(0xFF00L, $opt$OrLit16(0L));
+ expectEquals(0xFF03L, $opt$OrLit16(3L));
+ expectEquals(-12L, $opt$OrLit16(-12L));
+ }
+
+ static int $opt$Or(int a, int b) {
+ return a | b;
+ }
+
+ static int $opt$OrLit8(int a) {
+ return a | 0xF;
+ }
+
+ static int $opt$OrLit16(int a) {
+ return a | 0xFF00;
+ }
+
+ static long $opt$Or(long a, long b) {
+ return a | b;
+ }
+
+ static long $opt$OrLit8(long a) {
+ return a | 0xF;
+ }
+
+ static long $opt$OrLit16(long a) {
+ return a | 0xFF00;
+ }
+
+ private static void xorInt() {
+ expectEquals(6, $opt$Xor(5, 3));
+ expectEquals(0, $opt$Xor(0, 0));
+ expectEquals(3, $opt$Xor(0, 3));
+ expectEquals(3, $opt$Xor(3, 0));
+ expectEquals(-4, $opt$Xor(1, -3));
+ expectEquals(9, $opt$Xor(-12, -3));
+
+ expectEquals(14, $opt$XorLit8(1));
+ expectEquals(15, $opt$XorLit8(0));
+ expectEquals(12, $opt$XorLit8(3));
+ expectEquals(-5, $opt$XorLit8(-12));
+
+ expectEquals(0xFF01, $opt$XorLit16(1));
+ expectEquals(0xFF00, $opt$XorLit16(0));
+ expectEquals(0xFF03, $opt$XorLit16(3));
+ expectEquals(-0xFF0c, $opt$XorLit16(-12));
+ }
+
+ private static void xorLong() {
+ expectEquals(6L, $opt$Xor(5L, 3L));
+ expectEquals(0L, $opt$Xor(0L, 0L));
+ expectEquals(3L, $opt$Xor(0L, 3L));
+ expectEquals(3L, $opt$Xor(3L, 0L));
+ expectEquals(-4L, $opt$Xor(1L, -3L));
+ expectEquals(9L, $opt$Xor(-12L, -3L));
+
+ expectEquals(14L, $opt$XorLit8(1L));
+ expectEquals(15L, $opt$XorLit8(0L));
+ expectEquals(12L, $opt$XorLit8(3L));
+ expectEquals(-5L, $opt$XorLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$XorLit16(1L));
+ expectEquals(0xFF00L, $opt$XorLit16(0L));
+ expectEquals(0xFF03L, $opt$XorLit16(3L));
+ expectEquals(-0xFF0cL, $opt$XorLit16(-12L));
+ }
+
+ static int $opt$Xor(int a, int b) {
+ return a ^ b;
+ }
+
+ static int $opt$XorLit8(int a) {
+ return a ^ 0xF;
+ }
+
+ static int $opt$XorLit16(int a) {
+ return a ^ 0xFF00;
+ }
+
+ static long $opt$Xor(long a, long b) {
+ return a ^ b;
+ }
+
+ static long $opt$XorLit8(long a) {
+ return a ^ 0xF;
+ }
+
+ static long $opt$XorLit16(long a) {
+ return a ^ 0xFF00;
+ }
+}
diff --git a/test/427-bounds/expected.txt b/test/427-bounds/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/427-bounds/expected.txt
diff --git a/test/427-bounds/info.txt b/test/427-bounds/info.txt
new file mode 100644
index 0000000000..8b8b95778e
--- /dev/null
+++ b/test/427-bounds/info.txt
@@ -0,0 +1,2 @@
+Regression test for the optimizing compiler that used to incorrectly pass
+index and/or length to the pThrowArrayBounds entrypoint.
diff --git a/test/427-bounds/src/Main.java b/test/427-bounds/src/Main.java
new file mode 100644
index 0000000000..a2d84d23ca
--- /dev/null
+++ b/test/427-bounds/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ Exception exception = null;
+ try {
+ $opt$Throw(new int[1]);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ exception = e;
+ }
+
+ String exceptionMessage = exception.getMessage();
+
+ // Note that it's ART specific to emit the length.
+ if (exceptionMessage.contains("length")) {
+ if (!exceptionMessage.contains("length=1")) {
+ throw new Error("Wrong length in exception message");
+ }
+ }
+
+ // Note that it's ART specific to emit the index.
+ if (exceptionMessage.contains("index")) {
+ if (!exceptionMessage.contains("index=2")) {
+ throw new Error("Wrong index in exception message");
+ }
+ }
+ }
+
+ static void $opt$Throw(int[] array) {
+ // We fetch the length first, to ensure it is in EAX (on x86).
+ // The pThrowArrayBounds entrypoint expects the index in EAX and the
+ // length in ECX, and the optimizing compiler used to write to EAX
+ // before putting the length in ECX.
+ int length = array.length;
+ array[2] = 42;
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 4002fbf7a5..3e3955b106 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,3 +1,4 @@
b/17790197
FloatBadArgReg
+negLong
Done!
diff --git a/test/800-smali/smali/negLong.smali b/test/800-smali/smali/negLong.smali
new file mode 100755
index 0000000000..29d416e066
--- /dev/null
+++ b/test/800-smali/smali/negLong.smali
@@ -0,0 +1,186 @@
+.class public LnegLong;
+.super Ljava/lang/Object;
+.source "negLong.java"
+# static fields
+.field public static final N:I = 0x64
+.field public static i:I
+# direct methods
+.method static constructor <clinit>()V
+ .registers 1
+ .prologue
+ .line 5
+ const/16 v0, 0x44da
+ sput v0, LnegLong;->i:I
+ return-void
+.end method
+.method public constructor <init>()V
+ .registers 1
+ .prologue
+ .line 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+.method public static checkSum1([S)J
+ .registers 7
+ .prologue
+ .line 14
+ array-length v3, p0
+ .line 15
+ const-wide/16 v0, 0x0
+ .line 16
+ const/4 v2, 0x0
+ :goto_4
+ if-ge v2, v3, :cond_d
+ .line 17
+ aget-short v4, p0, v2
+ int-to-long v4, v4
+ add-long/2addr v0, v4
+ .line 16
+ add-int/lit8 v2, v2, 0x1
+ goto :goto_4
+ .line 18
+ :cond_d
+ return-wide v0
+.end method
+.method public static init1([SS)V
+ .registers 4
+ .prologue
+ .line 8
+ array-length v1, p0
+ .line 9
+ const/4 v0, 0x0
+ :goto_2
+ if-ge v0, v1, :cond_9
+ .line 10
+ aput-short p1, p0, v0
+ .line 9
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_2
+ .line 11
+ :cond_9
+ return-void
+.end method
+.method public static main([Ljava/lang/String;)V
+ .registers 6
+ .prologue
+ .line 50
+ invoke-static {}, LnegLong;->negLong()J
+ move-result-wide v0
+ .line 51
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ new-instance v3, Ljava/lang/StringBuilder;
+ invoke-direct {v3}, Ljava/lang/StringBuilder;-><init>()V
+ const-string v4, "nbp ztw p = "
+ invoke-virtual {v3, v4}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v3
+ invoke-virtual {v3, v0, v1}, Ljava/lang/StringBuilder;->append(J)Ljava/lang/StringBuilder;
+ move-result-object v0
+ invoke-virtual {v0}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v2, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ .line 52
+ return-void
+.end method
+.method public static negLong()J
+ .registers 17
+ .prologue
+ .line 23
+ const-wide v1, -0x4c4a1f4aa9b1db83L
+ .line 24
+ const v7, -0x3f727efa
+ .line 25
+ const/16 v4, -0x284b
+ const v3, 0xdc01
+ .line 26
+ const/16 v0, 0x64
+ new-array v8, v0, [S
+ .line 28
+ const/16 v0, 0x1c60
+ invoke-static {v8, v0}, LnegLong;->init1([SS)V
+ .line 29
+ const/4 v0, 0x2
+ move v6, v0
+ :goto_18
+ const/16 v0, 0x56
+ if-ge v6, v0, :cond_64
+ .line 30
+ const/4 v0, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ :goto_21
+ if-ge v5, v6, :cond_5d
+ .line 31
+ int-to-float v0, v4
+ neg-float v1, v7
+ add-float/2addr v0, v1
+ float-to-int v1, v0
+ .line 32
+ const/4 v0, 0x1
+ move v4, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ .line 33
+ :goto_2b
+ add-int/lit8 v3, v0, 0x1
+ const/16 v0, 0x1b
+ if-ge v3, v0, :cond_3a
+ .line 35
+ int-to-long v9, v5
+ mul-long v0, v9, v1
+ neg-long v1, v0
+ .line 38
+ sget v0, LnegLong;->i:I
+ move v4, v0
+ move v0, v3
+ goto :goto_2b
+ .line 40
+ :cond_3a
+ aget-short v0, v8, v6
+ int-to-double v9, v0
+ long-to-double v11, v1
+ const-wide v13, 0x403f9851eb851eb8L
+ sub-double/2addr v11, v13
+ add-double/2addr v9, v11
+ double-to-int v0, v9
+ int-to-short v0, v0
+ aput-short v0, v8, v6
+ .line 41
+ const/4 v0, 0x2
+ :goto_4a
+ const/16 v9, 0x43
+ if-ge v0, v9, :cond_56
+ .line 42
+ neg-long v9, v1
+ const-wide/16 v11, 0x1
+ or-long/2addr v9, v11
+ add-long/2addr v1, v9
+ .line 41
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_4a
+ .line 30
+ :cond_56
+ add-int/lit8 v0, v5, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ goto :goto_21
+ .line 29
+ :cond_5d
+ add-int/lit8 v1, v6, 0x1
+ move v6, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ move v3, v0
+ goto :goto_18
+ .line 45
+ :cond_64
+ invoke-static {v8}, LnegLong;->checkSum1([S)J
+ move-result-wide v0
+ int-to-long v2, v3
+ add-long/2addr v0, v2
+ .line 46
+ return-wide v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index c86470ce67..87549d9fdb 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -51,6 +51,7 @@ public class Main {
testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
testCases.add(new TestCase("FloatBadArgReg", "FloatBadArgReg", "getInt",
new Object[]{100}, null, 100));
+ testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e7a04391b2..3b949d6ad0 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -177,15 +177,6 @@ endif
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
-TEST_ART_BROKEN_RUN_TESTS := \
- 004-ThreadStress
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_RUN_TESTS :=
-
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
116-nodex2oat
@@ -295,8 +286,7 @@ endif
TEST_ART_BROKEN_NDEBUG_TESTS :=
# Known broken tests for the default compiler (Quick).
-TEST_ART_BROKEN_DEFAULT_RUN_TESTS := \
- 412-new-array
+TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
ifneq (,$(filter default,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -313,6 +303,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
003-omnibus-opcodes \
004-InterfaceTest \
004-JniTest \
+ 004-NativeAllocations \
004-ReferenceMap \
004-SignalTest \
004-StackWalk \
@@ -321,8 +312,11 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
006-args \
007-count10 \
008-exceptions \
+ 009-instanceof \
+ 010-instance \
011-array-copy \
013-math2 \
+ 014-math3 \
016-intern \
017-float \
018-stack-overflow \
@@ -332,6 +326,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
022-interface \
023-many-interfaces \
024-illegal-access \
+ 025-access-controller \
026-access \
028-array-write \
029-assert \
@@ -339,6 +334,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
031-class-attributes \
032-concrete-sub \
033-class-init-deadlock \
+ 034-call-null \
035-enum \
036-finalizer \
037-inherit \
@@ -358,7 +354,9 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
054-uncaught \
055-enum-performance \
056-const-string-jumbo \
+ 058-enum-order \
061-out-of-memory \
+ 062-character-encodings \
063-process-manager \
064-field-access \
065-mismatched-implements \
@@ -399,14 +397,17 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
105-invoke \
106-exceptions2 \
107-int-math2 \
+ 108-check-cast \
109-suspend-check \
110-field-access \
111-unresolvable-exception \
112-double-math \
113-multidex \
+ 114-ParallelGC \
117-nopatchoat \
118-noimage-dex2oat \
119-noimage-patchoat \
+ 120-hashcode \
121-modifiers \
121-simple-suspend-check \
122-npe \
@@ -419,6 +420,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
300-package-override \
301-abstract-protected \
303-verification-stress \
+ 304-method-tracing \
401-optimizing-compiler \
402-optimizing-control-flow \
403-optimizing-long \
@@ -444,6 +446,10 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
422-instanceof \
422-type-conversion \
423-invoke-interface \
+ 424-checkcast \
+ 426-monitor \
+ 427-bitwise \
+ 427-bounds \
700-LoadArgRegs \
701-easy-div-rem \
702-LargeBranchOffset \