summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk5
-rw-r--r--compiler/compiled_method.cc16
-rw-r--r--compiler/dex/frontend.cc3
-rw-r--r--compiler/dex/quick/gen_common.cc30
-rw-r--r--compiler/dex/quick/x86/target_x86.cc46
-rw-r--r--compiler/driver/compiler_driver.cc3
-rw-r--r--compiler/driver/compiler_driver.h1
-rw-r--r--compiler/image_writer.cc6
-rw-r--r--compiler/image_writer.h2
-rw-r--r--compiler/jni/quick/jni_compiler.cc19
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc19
-rw-r--r--compiler/oat_writer.cc42
-rw-r--r--compiler/optimizing/builder.cc256
-rw-r--r--compiler/optimizing/builder.h42
-rw-r--r--compiler/optimizing/code_generator.h159
-rw-r--r--compiler/optimizing/code_generator_arm.cc491
-rw-r--r--compiler/optimizing/code_generator_arm.h48
-rw-r--r--compiler/optimizing/code_generator_x86.cc521
-rw-r--r--compiler/optimizing/code_generator_x86.h48
-rw-r--r--compiler/optimizing/nodes.h72
-rw-r--r--compiler/utils/managed_register.h4
-rw-r--r--compiler/utils/x86/managed_register_x86.cc3
-rw-r--r--compiler/utils/x86/managed_register_x86.h9
23 files changed, 1380 insertions, 465 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index b17cd52fad..6d656e63f1 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -158,11 +158,10 @@ define build-libart-compiler
art_ndebug_or_debug := $(2)
include $(CLEAR_VARS)
- ifeq ($$(art_target_or_host),target)
- include external/stlport/libstlport.mk
- else
+ ifeq ($$(art_target_or_host),host)
LOCAL_IS_HOST_MODULE := true
endif
+ include art/build/Android.libcxx.mk
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart-compiler
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 8e013c1ece..59ed8277d0 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -82,21 +82,7 @@ uint32_t CompiledCode::AlignCode(uint32_t offset) const {
}
uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
- switch (instruction_set) {
- case kArm:
- case kThumb2:
- return RoundUp(offset, kArmAlignment);
- case kArm64:
- return RoundUp(offset, kArm64Alignment);
- case kMips:
- return RoundUp(offset, kMipsAlignment);
- case kX86: // Fall-through.
- case kX86_64:
- return RoundUp(offset, kX86Alignment);
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return 0;
- }
+ return RoundUp(offset, GetInstructionSetAlignment(instruction_set));
}
size_t CompiledCode::CodeDelta() const {
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 201dc47bf7..1bf5fce989 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -158,7 +158,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
if (cu.instruction_set == kArm) {
cu.instruction_set = kThumb2;
}
- cu.target64 = (cu.instruction_set == kX86_64) || (cu.instruction_set == kArm64);
+ cu.target64 = Is64BitInstructionSet(cu.instruction_set);
cu.compiler = compiler;
// TODO: x86_64 & arm64 are not yet implemented.
CHECK((cu.instruction_set == kThumb2) ||
@@ -166,7 +166,6 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
(cu.instruction_set == kX86_64) ||
(cu.instruction_set == kMips));
-
/* Adjust this value accordingly once inlining is performed */
cu.num_dalvik_registers = code_item->registers_size_;
// TODO: set this from command line
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0d938d91cd..055f60c1c8 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -520,7 +520,12 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
}
// rBase now holds static storage base
if (is_long_or_double) {
- rl_src = LoadValueWide(rl_src, kAnyReg);
+ RegisterClass register_kind = kAnyReg;
+ if (field_info.IsVolatile() && cu_->instruction_set == kX86) {
+ // Force long/double volatile stores into SSE registers to avoid tearing.
+ register_kind = kFPReg;
+ }
+ rl_src = LoadValueWide(rl_src, register_kind);
} else {
rl_src = LoadValue(rl_src, kAnyReg);
}
@@ -601,7 +606,12 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
FreeTemp(r_method);
}
// r_base now holds static storage base
- RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
+ RegisterClass result_reg_kind = kAnyReg;
+ if (field_info.IsVolatile() && cu_->instruction_set == kX86) {
+ // Force long/double volatile loads into SSE registers to avoid tearing.
+ result_reg_kind = kFPReg;
+ }
+ RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true);
if (is_long_or_double) {
LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
@@ -755,9 +765,12 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
DCHECK(rl_dest.wide);
GenNullCheck(rl_obj.reg, opt_flags);
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- rl_result = EvalLoc(rl_dest, reg_class, true);
- // FIXME? duplicate null check?
- GenNullCheck(rl_obj.reg, opt_flags);
+ RegisterClass result_reg_kind = kAnyReg;
+ if (field_info.IsVolatile() && cu_->instruction_set == kX86) {
+ // Force long/double volatile loads into SSE registers to avoid tearing.
+ result_reg_kind = kFPReg;
+ }
+ rl_result = EvalLoc(rl_dest, result_reg_kind, true);
LoadBaseDispWide(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg,
rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
@@ -822,7 +835,12 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
- rl_src = LoadValueWide(rl_src, kAnyReg);
+ RegisterClass src_reg_kind = kAnyReg;
+ if (field_info.IsVolatile() && cu_->instruction_set == kX86) {
+ // Force long/double volatile stores into SSE registers to avoid tearing.
+ src_reg_kind = kFPReg;
+ }
+ rl_src = LoadValueWide(rl_src, src_reg_kind);
GenNullCheck(rl_obj.reg, opt_flags);
RegStorage reg_ptr = AllocTemp();
OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value());
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index dcc5d9b73e..5a8ad7a2b4 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1064,6 +1064,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
LoadWordDisp(rs_rDX, count_offset, rs_rCX);
LIR *length_compare = nullptr;
int start_value = 0;
+ bool is_index_on_stack = false;
if (zero_based) {
// We have to handle an empty string. Use special instruction JECXZ.
length_compare = NewLIR0(kX86Jecxz8);
@@ -1084,14 +1085,32 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Runtime start index.
rl_start = UpdateLoc(rl_start);
if (rl_start.location == kLocPhysReg) {
+ // Handle "start index < 0" case.
+ OpRegReg(kOpXor, rs_rBX, rs_rBX);
+ OpRegReg(kOpCmp, rl_start.reg, rs_rBX);
+ OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX);
+
+ // The length of the string should be greater than the start index.
length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
OpRegReg(kOpSub, rs_rCX, rl_start.reg);
+ if (rl_start.reg == rs_rDI) {
+ // The special case. We will use EDI further, so lets put start index to stack.
+ NewLIR1(kX86Push32R, rDI);
+ is_index_on_stack = true;
+ }
} else {
- // Compare to memory to avoid a register load. Handle pushed EDI.
+ // Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- OpRegMem(kOpCmp, rs_rCX, rs_rX86_SP, displacement);
- length_compare = NewLIR2(kX86Jcc8, 0, kX86CondLe);
- OpRegMem(kOpSub, rs_rCX, rs_rX86_SP, displacement);
+ LoadWordDisp(rs_rX86_SP, displacement, rs_rBX);
+ OpRegReg(kOpXor, rs_rDI, rs_rDI);
+ OpRegReg(kOpCmp, rs_rBX, rs_rDI);
+ OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
+
+ length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
+ OpRegReg(kOpSub, rs_rCX, rs_rBX);
+ // Put the start index to stack.
+ NewLIR1(kX86Push32R, rBX);
+ is_index_on_stack = true;
}
}
}
@@ -1113,21 +1132,12 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
NewLIR3(kX86Lea32RM, rDI, rBX, 2 * start_value);
}
} else {
- if (rl_start.location == kLocPhysReg) {
- if (rl_start.reg.GetReg() == rDI) {
- // We have a slight problem here. We are already using RDI!
- // Grab the value from the stack.
- LoadWordDisp(rs_rX86_SP, 0, rs_rDX);
- OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
- } else {
- OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
- }
- } else {
- OpRegCopy(rs_rDI, rs_rBX);
- // Load the start index from stack, remembering that we pushed EDI.
- int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- LoadWordDisp(rs_rX86_SP, displacement, rs_rDX);
+ if (is_index_on_stack == true) {
+ // Load the start index from stack.
+ NewLIR1(kX86Pop32R, rDX);
OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
+ } else {
+ OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 2b20c6fc75..0ad30be3fe 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -339,7 +339,6 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
compiler_(Compiler::Create(compiler_kind)),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
- instruction_set_is_64_bit_(instruction_set == kX86_64 || instruction_set == kArm64),
freezing_constructor_lock_("freezing constructor lock"),
compiled_classes_lock_("compiled classes lock"),
compiled_methods_lock_("compiled method lock"),
@@ -448,7 +447,7 @@ CompilerTls* CompilerDriver::GetTls() {
}
#define CREATE_TRAMPOLINE(type, abi, offset) \
- if (instruction_set_is_64_bit_) { \
+ if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
type ## _ENTRYPOINT_OFFSET(8, offset)); \
} else { \
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index d49523a172..d7d40d554a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -692,7 +692,6 @@ class CompilerDriver {
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
- const bool instruction_set_is_64_bit_;
// All class references that require
mutable ReaderWriterMutex freezing_constructor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 0405198350..f76587a26e 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -235,8 +235,8 @@ bool ImageWriter::AllocMemory() {
}
// Create the image bitmap.
- image_bitmap_.reset(gc::accounting::SpaceBitmap::Create("image bitmap", image_->Begin(),
- length));
+ image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
+ length));
if (image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -525,7 +525,7 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
// Return to write header at start of image with future location of image_roots. At this point,
// image_end_ is the size of the image (excluding bitmaps).
- const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * gc::accounting::SpaceBitmap::kAlignment;
+ const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment;
const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) /
heap_bytes_per_bitmap_byte;
ImageHeader image_header(PointerToLowMemUInt32(image_begin_),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 92b24f6067..ee241cb02f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -173,7 +173,7 @@ class ImageWriter {
const byte* oat_data_begin_;
// Image bitmap which lets us know where the objects inside of the image reside.
- UniquePtr<gc::accounting::SpaceBitmap> image_bitmap_;
+ UniquePtr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
// Offset from oat_data_begin_ to the stubs.
uint32_t interpreter_to_interpreter_bridge_offset_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 64508d10fc..93b1b5a155 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -64,6 +64,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
if (instruction_set == kThumb2) {
instruction_set = kArm;
}
+ const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
// Calling conventions used to iterate over parameters to method
UniquePtr<JniCallingConvention> main_jni_conv(
JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
@@ -109,7 +110,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
Thread::TopSirtOffset<8>(),
mr_conv->InterproceduralScratchRegister());
@@ -171,7 +172,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
}
// 4. Write out the end of the quick frames.
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
__ StoreImmediateToThread64(Thread::TopOfManagedStackPcOffset<8>(), 0,
mr_conv->InterproceduralScratchRegister());
@@ -216,7 +217,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
}
if (main_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64),
main_jni_conv->InterproceduralScratchRegister());
} else {
@@ -226,7 +227,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
main_jni_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister());
} else {
__ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister());
@@ -292,14 +293,14 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
if (main_jni_conv->IsCurrentParamInRegister()) {
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
} else {
__ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
@@ -331,7 +332,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
}
CHECK_LT(return_save_location.Uint32Value(), frame_size+main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -380,7 +381,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
}
if (end_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(end_jni_conv->CurrentParamRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64),
end_jni_conv->InterproceduralScratchRegister());
} else {
@@ -390,7 +391,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
} else {
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
- if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ if (is_64_bit_target) {
__ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
} else {
__ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 1d027f9d3b..fe609593dc 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -314,23 +314,8 @@ bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_strea
// section if the section alignment is greater than kArchAlignment.
void LlvmCompilationUnit::CheckCodeAlign(uint32_t align) const {
InstructionSet insn_set = GetInstructionSet();
- switch (insn_set) {
- case kThumb2:
- case kArm:
- CHECK_LE(align, static_cast<uint32_t>(kArmAlignment));
- break;
-
- case kX86:
- CHECK_LE(align, static_cast<uint32_t>(kX86Alignment));
- break;
-
- case kMips:
- CHECK_LE(align, static_cast<uint32_t>(kMipsAlignment));
- break;
-
- default:
- LOG(FATAL) << "Unknown instruction set: " << insn_set;
- }
+ size_t insn_set_align = GetInstructionSetAlignment(insn_set);
+ CHECK_LE(align, static_cast<uint32_t>(insn_set_align));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index eff2425bb7..dc66e9c108 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -345,36 +345,6 @@ size_t OatWriter::InitOatCodeClassDef(size_t offset,
return offset;
}
-static void DCheckCodeAlignment(size_t offset, InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- DCHECK_ALIGNED(offset, kArmAlignment);
- break;
-
- case kArm64:
- DCHECK_ALIGNED(offset, kArm64Alignment);
- break;
-
- case kMips:
- DCHECK_ALIGNED(offset, kMipsAlignment);
- break;
-
- case kX86_64:
- // Fall-through.
- case kX86:
- DCHECK_ALIGNED(offset, kX86Alignment);
- break;
-
- case kNone:
- // Use a DCHECK instead of FATAL so that in the non-debug case the whole switch can
- // be optimized away.
- DCHECK(false);
- break;
- }
-}
-
size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
size_t __attribute__((unused)) class_def_index,
size_t class_def_method_index,
@@ -406,7 +376,8 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
} else {
CHECK(quick_code != nullptr);
offset = compiled_method->AlignCode(offset);
- DCheckCodeAlignment(offset, compiled_method->GetInstructionSet());
+ DCHECK_ALIGNED_PARAM(offset,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
uint32_t code_size = quick_code->size() * sizeof(uint8_t);
CHECK_NE(code_size, 0U);
@@ -539,11 +510,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
refs++;
}
}
- InstructionSet trg_isa = compiler_driver_->GetInstructionSet();
- size_t pointer_size = 4;
- if (trg_isa == kArm64 || trg_isa == kX86_64) {
- pointer_size = 8;
- }
+ size_t pointer_size = GetInstructionSetPointerSize(compiler_driver_->GetInstructionSet());
size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
// Get the generic spill masks and base frame size.
@@ -857,7 +824,8 @@ size_t OatWriter::WriteCodeMethod(OutputStream* out, const size_t file_offset,
relative_offset += aligned_code_delta;
DCHECK_OFFSET();
}
- DCheckCodeAlignment(relative_offset, compiled_method->GetInstructionSet());
+ DCHECK_ALIGNED_PARAM(relative_offset,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
uint32_t code_size = quick_code->size() * sizeof(uint8_t);
CHECK_NE(code_size, 0U);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index beccf01872..1efdd389d8 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -48,7 +48,8 @@ bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++);
+ HParameterValue* parameter =
+ new (arena_) HParameterValue(parameter_index++, Primitive::kPrimNot);
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter));
@@ -59,19 +60,24 @@ bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
for (int i = 0; i < number_of_parameters; i++) {
switch (shorty[pos++]) {
case 'F':
- case 'D':
- case 'J': {
+ case 'D': {
return false;
}
default: {
// integer and reference parameters.
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++);
+ HParameterValue* parameter =
+ new (arena_) HParameterValue(parameter_index++, Primitive::GetType(shorty[pos - 1]));
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
// Store the parameter value in the local that the dex code will use
// to reference that parameter.
entry_block_->AddInstruction(new (arena_) HStoreLocal(local, parameter));
+ if (parameter->GetType() == Primitive::kPrimLong) {
+ i++;
+ locals_index++;
+ parameter_index++;
+ }
break;
}
}
@@ -88,8 +94,8 @@ static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
template<typename T>
void HGraphBuilder::If_22t(const Instruction& instruction, int32_t dex_offset, bool is_not) {
- HInstruction* first = LoadLocal(instruction.VRegA());
- HInstruction* second = LoadLocal(instruction.VRegB());
+ HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
+ HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
current_block_->AddInstruction(new (arena_) T(first, second));
if (is_not) {
current_block_->AddInstruction(new (arena_) HNot(current_block_->GetLastInstruction()));
@@ -205,25 +211,25 @@ HBasicBlock* HGraphBuilder::FindBlockStartingAt(int32_t index) const {
}
template<typename T>
-void HGraphBuilder::Binop_32x(const Instruction& instruction) {
- HInstruction* first = LoadLocal(instruction.VRegB());
- HInstruction* second = LoadLocal(instruction.VRegC());
- current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second));
+void HGraphBuilder::Binop_32x(const Instruction& instruction, Primitive::Type type) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
template<typename T>
-void HGraphBuilder::Binop_12x(const Instruction& instruction) {
- HInstruction* first = LoadLocal(instruction.VRegA());
- HInstruction* second = LoadLocal(instruction.VRegB());
- current_block_->AddInstruction(new (arena_) T(Primitive::kPrimInt, first, second));
+void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
- HInstruction* first = LoadLocal(instruction.VRegB());
- HInstruction* second = GetConstant(instruction.VRegC_22s());
+ HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
+ HInstruction* second = GetIntConstant(instruction.VRegC_22s());
if (reverse) {
std::swap(first, second);
}
@@ -233,8 +239,8 @@ void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
template<typename T>
void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse) {
- HInstruction* first = LoadLocal(instruction.VRegB());
- HInstruction* second = GetConstant(instruction.VRegC_22b());
+ HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
+ HInstruction* second = GetIntConstant(instruction.VRegC_22b());
if (reverse) {
std::swap(first, second);
}
@@ -242,6 +248,74 @@ void HGraphBuilder::Binop_22b(const Instruction& instruction, bool reverse) {
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
+void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type type) {
+ if (type == Primitive::kPrimVoid) {
+ current_block_->AddInstruction(new (arena_) HReturnVoid());
+ } else {
+ HInstruction* value = LoadLocal(instruction.VRegA(), type);
+ current_block_->AddInstruction(new (arena_) HReturn(value));
+ }
+ current_block_->AddSuccessor(exit_block_);
+ current_block_ = nullptr;
+}
+
+bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
+ uint32_t dex_offset,
+ uint32_t method_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index) {
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_);
+ const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
+ Primitive::Type return_type = Primitive::GetType(descriptor[0]);
+ bool is_instance_call =
+ instruction.Opcode() != Instruction::INVOKE_STATIC
+ && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE;
+ const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
+
+ // Treat invoke-direct like static calls for now.
+ HInvoke* invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_offset, method_idx);
+
+ size_t start_index = 0;
+ if (is_instance_call) {
+ HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
+ invoke->SetArgumentAt(0, arg);
+ start_index = 1;
+ }
+
+ uint32_t descriptor_index = 1;
+ uint32_t argument_index = start_index;
+ for (size_t i = start_index; i < number_of_vreg_arguments; i++, argument_index++) {
+ Primitive::Type type = Primitive::GetType(descriptor[descriptor_index++]);
+ switch (type) {
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ return false;
+
+ default: {
+ if (!is_range && type == Primitive::kPrimLong && args[i] + 1 != args[i + 1]) {
+ LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
+ << " at " << dex_offset;
+ // We do not implement non sequential register pair.
+ return false;
+ }
+ HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
+ invoke->SetArgumentAt(argument_index, arg);
+ if (type == Primitive::kPrimLong) {
+ i++;
+ }
+ }
+ }
+ }
+
+ DCHECK_EQ(argument_index, number_of_arguments);
+ current_block_->AddInstruction(invoke);
+ return true;
+}
+
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
if (current_block_ == nullptr) {
return true; // Dead code
@@ -250,28 +324,47 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
switch (instruction.Opcode()) {
case Instruction::CONST_4: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetConstant(instruction.VRegB_11n());
+ HIntConstant* constant = GetIntConstant(instruction.VRegB_11n());
UpdateLocal(register_index, constant);
break;
}
case Instruction::CONST_16: {
int32_t register_index = instruction.VRegA();
- HIntConstant* constant = GetConstant(instruction.VRegB_21s());
+ HIntConstant* constant = GetIntConstant(instruction.VRegB_21s());
+ UpdateLocal(register_index, constant);
+ break;
+ }
+
+ case Instruction::CONST_WIDE_16: {
+ int32_t register_index = instruction.VRegA();
+ HLongConstant* constant = GetLongConstant(instruction.VRegB_21s());
+ UpdateLocal(register_index, constant);
+ break;
+ }
+
+ case Instruction::CONST_WIDE_32: {
+ int32_t register_index = instruction.VRegA();
+ HLongConstant* constant = GetLongConstant(instruction.VRegB_31i());
+ UpdateLocal(register_index, constant);
+ break;
+ }
+
+ case Instruction::CONST_WIDE: {
+ int32_t register_index = instruction.VRegA();
+ HLongConstant* constant = GetLongConstant(instruction.VRegB_51l());
UpdateLocal(register_index, constant);
break;
}
case Instruction::MOVE: {
- HInstruction* value = LoadLocal(instruction.VRegB());
+ HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
UpdateLocal(instruction.VRegA(), value);
break;
}
case Instruction::RETURN_VOID: {
- current_block_->AddInstruction(new (arena_) HReturnVoid());
- current_block_->AddSuccessor(exit_block_);
- current_block_ = nullptr;
+ BuildReturn(instruction, Primitive::kPrimVoid);
break;
}
@@ -296,88 +389,82 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
break;
}
- case Instruction::RETURN:
+ case Instruction::RETURN: {
+ BuildReturn(instruction, Primitive::kPrimInt);
+ break;
+ }
+
case Instruction::RETURN_OBJECT: {
- HInstruction* value = LoadLocal(instruction.VRegA());
- current_block_->AddInstruction(new (arena_) HReturn(value));
- current_block_->AddSuccessor(exit_block_);
- current_block_ = nullptr;
+ BuildReturn(instruction, Primitive::kPrimNot);
+ break;
+ }
+
+ case Instruction::RETURN_WIDE: {
+ BuildReturn(instruction, Primitive::kPrimLong);
break;
}
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT: {
uint32_t method_idx = instruction.VRegB_35c();
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
- uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- const size_t number_of_arguments = instruction.VRegA_35c();
-
- if (Primitive::GetType(descriptor[0]) != Primitive::kPrimVoid) {
- return false;
- }
-
- // Treat invoke-direct like static calls for now.
- HInvokeStatic* invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, dex_offset, method_idx);
-
+ uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetArgs(args);
-
- for (size_t i = 0; i < number_of_arguments; i++) {
- HInstruction* arg = LoadLocal(args[i]);
- HInstruction* push = new (arena_) HPushArgument(arg, i);
- current_block_->AddInstruction(push);
- invoke->SetArgumentAt(i, push);
+ if (!BuildInvoke(instruction, dex_offset, method_idx, number_of_vreg_arguments, false, args, -1)) {
+ return false;
}
-
- current_block_->AddInstruction(invoke);
break;
}
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
- uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- const size_t number_of_arguments = instruction.VRegA_3rc();
-
- if (Primitive::GetType(descriptor[0]) != Primitive::kPrimVoid) {
+ uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
+ uint32_t register_index = instruction.VRegC();
+ if (!BuildInvoke(instruction, dex_offset, method_idx,
+ number_of_vreg_arguments, true, nullptr, register_index)) {
return false;
}
-
- // Treat invoke-direct like static calls for now.
- HInvokeStatic* invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, dex_offset, method_idx);
- int32_t register_index = instruction.VRegC();
- for (size_t i = 0; i < number_of_arguments; i++) {
- HInstruction* arg = LoadLocal(register_index + i);
- HInstruction* push = new (arena_) HPushArgument(arg, i);
- current_block_->AddInstruction(push);
- invoke->SetArgumentAt(i, push);
- }
- current_block_->AddInstruction(invoke);
break;
}
case Instruction::ADD_INT: {
- Binop_32x<HAdd>(instruction);
+ Binop_32x<HAdd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::ADD_LONG: {
+ Binop_32x<HAdd>(instruction, Primitive::kPrimLong);
break;
}
case Instruction::SUB_INT: {
- Binop_32x<HSub>(instruction);
+ Binop_32x<HSub>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::SUB_LONG: {
+ Binop_32x<HSub>(instruction, Primitive::kPrimLong);
break;
}
case Instruction::ADD_INT_2ADDR: {
- Binop_12x<HAdd>(instruction);
+ Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::ADD_LONG_2ADDR: {
+ Binop_12x<HAdd>(instruction, Primitive::kPrimLong);
break;
}
case Instruction::SUB_INT_2ADDR: {
- Binop_12x<HSub>(instruction);
+ Binop_12x<HSub>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::SUB_LONG_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimLong);
break;
}
@@ -408,6 +495,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
break;
}
+ case Instruction::MOVE_RESULT_WIDE: {
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ break;
+ }
+
case Instruction::NOP:
break;
@@ -417,7 +509,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
return true;
}
-HIntConstant* HGraphBuilder::GetConstant0() {
+HIntConstant* HGraphBuilder::GetIntConstant0() {
if (constant0_ != nullptr) {
return constant0_;
}
@@ -426,7 +518,7 @@ HIntConstant* HGraphBuilder::GetConstant0() {
return constant0_;
}
-HIntConstant* HGraphBuilder::GetConstant1() {
+HIntConstant* HGraphBuilder::GetIntConstant1() {
if (constant1_ != nullptr) {
return constant1_;
}
@@ -435,10 +527,10 @@ HIntConstant* HGraphBuilder::GetConstant1() {
return constant1_;
}
-HIntConstant* HGraphBuilder::GetConstant(int constant) {
+HIntConstant* HGraphBuilder::GetIntConstant(int32_t constant) {
switch (constant) {
- case 0: return GetConstant0();
- case 1: return GetConstant1();
+ case 0: return GetIntConstant0();
+ case 1: return GetIntConstant1();
default: {
HIntConstant* instruction = new (arena_) HIntConstant(constant);
entry_block_->AddInstruction(instruction);
@@ -447,6 +539,12 @@ HIntConstant* HGraphBuilder::GetConstant(int constant) {
}
}
+HLongConstant* HGraphBuilder::GetLongConstant(int64_t constant) {
+ HLongConstant* instruction = new (arena_) HLongConstant(constant);
+ entry_block_->AddInstruction(instruction);
+ return instruction;
+}
+
HLocal* HGraphBuilder::GetLocalAt(int register_index) const {
return locals_.Get(register_index);
}
@@ -456,9 +554,9 @@ void HGraphBuilder::UpdateLocal(int register_index, HInstruction* instruction) c
current_block_->AddInstruction(new (arena_) HStoreLocal(local, instruction));
}
-HInstruction* HGraphBuilder::LoadLocal(int register_index) const {
+HInstruction* HGraphBuilder::LoadLocal(int register_index, Primitive::Type type) const {
HLocal* local = GetLocalAt(register_index);
- current_block_->AddInstruction(new (arena_) HLoadLocal(local));
+ current_block_->AddInstruction(new (arena_) HLoadLocal(local, type));
return current_block_->GetLastInstruction();
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 60d998224c..108514a632 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -19,6 +19,7 @@
#include "dex_file.h"
#include "driver/dex_compilation_unit.h"
+#include "primitive.h"
#include "utils/allocation.h"
#include "utils/growable_array.h"
@@ -29,13 +30,14 @@ class Instruction;
class HBasicBlock;
class HGraph;
class HIntConstant;
+class HLongConstant;
class HInstruction;
class HLocal;
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(ArenaAllocator* arena,
- const DexCompilationUnit* dex_compilation_unit = nullptr,
+ DexCompilationUnit* dex_compilation_unit = nullptr,
const DexFile* dex_file = nullptr)
: arena_(arena),
branch_targets_(arena, 0),
@@ -63,24 +65,44 @@ class HGraphBuilder : public ValueObject {
void MaybeUpdateCurrentBlock(size_t index);
HBasicBlock* FindBlockStartingAt(int32_t index) const;
- HIntConstant* GetConstant0();
- HIntConstant* GetConstant1();
- HIntConstant* GetConstant(int constant);
+ HIntConstant* GetIntConstant0();
+ HIntConstant* GetIntConstant1();
+ HIntConstant* GetIntConstant(int32_t constant);
+ HLongConstant* GetLongConstant(int64_t constant);
void InitializeLocals(uint16_t count);
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
- HInstruction* LoadLocal(int register_index) const;
+ HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
// Temporarily returns whether the compiler supports the parameters
// of the method.
bool InitializeParameters(uint16_t number_of_parameters);
- template<typename T> void Binop_32x(const Instruction& instruction);
- template<typename T> void Binop_12x(const Instruction& instruction);
- template<typename T> void Binop_22b(const Instruction& instruction, bool reverse);
- template<typename T> void Binop_22s(const Instruction& instruction, bool reverse);
+ template<typename T>
+ void Binop_32x(const Instruction& instruction, Primitive::Type type);
+
+ template<typename T>
+ void Binop_12x(const Instruction& instruction, Primitive::Type type);
+
+ template<typename T>
+ void Binop_22b(const Instruction& instruction, bool reverse);
+
+ template<typename T>
+ void Binop_22s(const Instruction& instruction, bool reverse);
+
template<typename T> void If_22t(const Instruction& instruction, int32_t dex_offset, bool is_not);
+ void BuildReturn(const Instruction& instruction, Primitive::Type type);
+
+ // Builds an invocation node and returns whether the instruction is supported.
+ bool BuildInvoke(const Instruction& instruction,
+ uint32_t dex_offset,
+ uint32_t method_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -99,7 +121,7 @@ class HGraphBuilder : public ValueObject {
HIntConstant* constant1_;
const DexFile* const dex_file_;
- const DexCompilationUnit* const dex_compilation_unit_;
+ DexCompilationUnit* const dex_compilation_unit_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 66485989e7..5c7cac1e5c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "base/bit_field.h"
#include "globals.h"
#include "instruction_set.h"
#include "memory_region.h"
@@ -25,6 +26,8 @@
namespace art {
+static size_t constexpr kVRegSize = 4;
+
class DexCompilationUnit;
class CodeAllocator {
@@ -49,30 +52,149 @@ struct PcInfo {
*/
class Location : public ValueObject {
public:
- template<typename T>
- T reg() const { return static_cast<T>(reg_); }
+ enum Kind {
+ kInvalid = 0,
+ kStackSlot = 1, // Word size slot.
+ kDoubleStackSlot = 2, // 64bit stack slot.
+ kRegister = 3,
+ // On 32bits architectures, quick can pass a long where the
+ // low bits are in the last parameter register, and the high
+ // bits are in a stack slot. The kQuickParameter kind is for
+ // handling this special case.
+ kQuickParameter = 4,
+ };
+
+ Location() : value_(kInvalid) {
+ DCHECK(!IsValid());
+ }
+
+ Location(const Location& other) : ValueObject(), value_(other.value_) {}
+
+ Location& operator=(const Location& other) {
+ value_ = other.value_;
+ return *this;
+ }
- Location() : reg_(kInvalid) { }
- explicit Location(uword reg) : reg_(reg) { }
+ bool IsValid() const {
+ return value_ != kInvalid;
+ }
- static Location RegisterLocation(uword reg) {
- return Location(reg);
+ // Register locations.
+ static Location RegisterLocation(ManagedRegister reg) {
+ return Location(kRegister, reg.RegId());
}
- bool IsValid() const { return reg_ != kInvalid; }
+ bool IsRegister() const {
+ return GetKind() == kRegister;
+ }
- Location(const Location& other) : reg_(other.reg_) { }
+ ManagedRegister reg() const {
+ DCHECK(IsRegister());
+ return static_cast<ManagedRegister>(GetPayload());
+ }
- Location& operator=(const Location& other) {
- reg_ = other.reg_;
- return *this;
+ static uword EncodeStackIndex(intptr_t stack_index) {
+ DCHECK(-kStackIndexBias <= stack_index);
+ DCHECK(stack_index < kStackIndexBias);
+ return static_cast<uword>(kStackIndexBias + stack_index);
+ }
+
+ static Location StackSlot(intptr_t stack_index) {
+ uword payload = EncodeStackIndex(stack_index);
+ Location loc(kStackSlot, payload);
+ // Ensure that sign is preserved.
+ DCHECK_EQ(loc.GetStackIndex(), stack_index);
+ return loc;
+ }
+
+ bool IsStackSlot() const {
+ return GetKind() == kStackSlot;
+ }
+
+ static Location DoubleStackSlot(intptr_t stack_index) {
+ uword payload = EncodeStackIndex(stack_index);
+ Location loc(kDoubleStackSlot, payload);
+ // Ensure that sign is preserved.
+ DCHECK_EQ(loc.GetStackIndex(), stack_index);
+ return loc;
+ }
+
+ bool IsDoubleStackSlot() const {
+ return GetKind() == kDoubleStackSlot;
+ }
+
+ intptr_t GetStackIndex() const {
+ DCHECK(IsStackSlot() || IsDoubleStackSlot());
+ // Decode stack index manually to preserve sign.
+ return GetPayload() - kStackIndexBias;
+ }
+
+ intptr_t GetHighStackIndex(uintptr_t word_size) const {
+ DCHECK(IsDoubleStackSlot());
+ // Decode stack index manually to preserve sign.
+ return GetPayload() - kStackIndexBias + word_size;
+ }
+
+ static Location QuickParameter(uint32_t parameter_index) {
+ return Location(kQuickParameter, parameter_index);
+ }
+
+ uint32_t GetQuickParameterIndex() const {
+ DCHECK(IsQuickParameter());
+ return GetPayload();
+ }
+
+ bool IsQuickParameter() const {
+ return GetKind() == kQuickParameter;
+ }
+
+ arm::ArmManagedRegister AsArm() const;
+ x86::X86ManagedRegister AsX86() const;
+
+ Kind GetKind() const {
+ return KindField::Decode(value_);
+ }
+
+ bool Equals(Location other) const {
+ return value_ == other.value_;
+ }
+
+ const char* DebugString() const {
+ switch (GetKind()) {
+ case kInvalid: return "?";
+ case kRegister: return "R";
+ case kStackSlot: return "S";
+ case kDoubleStackSlot: return "DS";
+ case kQuickParameter: return "Q";
+ }
+ return "?";
}
private:
- // The target register for that location.
- // TODO: Support stack location.
- uword reg_;
- static const uword kInvalid = -1;
+ // Number of bits required to encode Kind value.
+ static constexpr uint32_t kBitsForKind = 4;
+ static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
+
+ explicit Location(uword value) : value_(value) {}
+
+ Location(Kind kind, uword payload)
+ : value_(KindField::Encode(kind) | PayloadField::Encode(payload)) {}
+
+ uword GetPayload() const {
+ return PayloadField::Decode(value_);
+ }
+
+ typedef BitField<Kind, 0, kBitsForKind> KindField;
+ typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
+
+ // Layout for stack slots.
+ static const intptr_t kStackIndexBias =
+ static_cast<intptr_t>(1) << (kBitsForPayload - 1);
+
+ // Location either contains kind and payload fields or a tagged handle for
+ // a constant locations. Values of enumeration Kind are selected in such a
+ // way that none of them can be interpreted as a kConstant tag.
+ uword value_;
};
/**
@@ -203,11 +325,10 @@ class CallingConvention {
return registers_[index];
}
- uint8_t GetStackOffsetOf(size_t index) const {
- DCHECK_GE(index, number_of_registers_);
+ uint8_t GetStackOffsetOf(size_t index, size_t word_size) const {
// We still reserve the space for parameters passed by registers.
- // Add kWordSize for the method pointer.
- return index * kWordSize + kWordSize;
+ // Add word_size for the method pointer.
+ return index * kVRegSize + word_size;
}
private:
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 4e88765e2c..27691ac080 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -17,6 +17,7 @@
#include "code_generator_arm.h"
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
+#include "utils/arm/managed_register_arm.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
@@ -24,11 +25,20 @@
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
namespace art {
+
+arm::ArmManagedRegister Location::AsArm() const {
+ return reg().AsArm();
+}
+
namespace arm {
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
+static Location ArmCoreLocation(Register reg) {
+ return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
+}
+
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
@@ -38,9 +48,11 @@ void CodeGeneratorARM::GenerateFrameEntry() {
core_spill_mask_ |= (1 << LR);
__ PushList((1 << LR));
- // Add the current ART method to the frame size, the return PC, and the filler.
- SetFrameSize(RoundUp((
- GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs() + 3) * kArmWordSize,
+ SetFrameSize(RoundUp(
+ (GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs()) * kVRegSize
+ + kVRegSize // filler
+ + kArmWordSize // Art method
+ + kNumberOfPushedRegistersAtEntry * kArmWordSize,
kStackAlignment));
// The return PC has already been pushed on the stack.
__ AddConstant(SP, -(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kArmWordSize));
@@ -63,28 +75,204 @@ int32_t CodeGeneratorARM::GetStackSlot(HLocal* local) const {
if (reg_number >= number_of_vregs - number_of_in_vregs) {
// Local is a parameter of the method. It is stored in the caller's frame.
return GetFrameSize() + kArmWordSize // ART method
- + (reg_number - number_of_vregs + number_of_in_vregs) * kArmWordSize;
+ + (reg_number - number_of_vregs + number_of_in_vregs) * kVRegSize;
} else {
// Local is a temporary in this method. It is stored in this method's frame.
return GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kArmWordSize)
- - kArmWordSize // filler.
- - (number_of_vregs * kArmWordSize)
- + (reg_number * kArmWordSize);
+ - kVRegSize // filler.
+ - (number_of_vregs * kVRegSize)
+ + (reg_number * kVRegSize);
+ }
+}
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t index = gp_index_++;
+ if (index < calling_convention.GetNumberOfRegisters()) {
+ return ArmCoreLocation(calling_convention.GetRegisterAt(index));
+ } else {
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(index, kArmWordSize));
+ }
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t index = gp_index_;
+ gp_index_ += 2;
+ if (index + 1 < calling_convention.GetNumberOfRegisters()) {
+ return Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index)));
+ } else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
+ return Location::QuickParameter(index);
+ } else {
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index, kArmWordSize));
+ }
+ }
+
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Unimplemented parameter type " << type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+ return Location();
+}
+
+void CodeGeneratorARM::Move32(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Mov(destination.AsArm().AsCoreRegister(), source.AsArm().AsCoreRegister());
+ } else {
+ __ ldr(destination.AsArm().AsCoreRegister(), Address(SP, source.GetStackIndex()));
+ }
+ } else {
+ DCHECK(destination.IsStackSlot());
+ if (source.IsRegister()) {
+ __ str(source.AsArm().AsCoreRegister(), Address(SP, destination.GetStackIndex()));
+ } else {
+ __ ldr(R0, Address(SP, source.GetStackIndex()));
+ __ str(R0, Address(SP, destination.GetStackIndex()));
+ }
+ }
+}
+
+void CodeGeneratorARM::Move64(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Mov(destination.AsArm().AsRegisterPairLow(), source.AsArm().AsRegisterPairLow());
+ __ Mov(destination.AsArm().AsRegisterPairHigh(), source.AsArm().AsRegisterPairHigh());
+ } else if (source.IsQuickParameter()) {
+ uint32_t argument_index = source.GetQuickParameterIndex();
+ InvokeDexCallingConvention calling_convention;
+ __ Mov(destination.AsArm().AsRegisterPairLow(),
+ calling_convention.GetRegisterAt(argument_index));
+ __ ldr(destination.AsArm().AsRegisterPairHigh(),
+ Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1, kArmWordSize) + GetFrameSize()));
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ if (destination.AsArm().AsRegisterPair() == R1_R2) {
+ __ ldr(R1, Address(SP, source.GetStackIndex()));
+ __ ldr(R2, Address(SP, source.GetHighStackIndex(kArmWordSize)));
+ } else {
+ __ LoadFromOffset(kLoadWordPair, destination.AsArm().AsRegisterPairLow(),
+ SP, source.GetStackIndex());
+ }
+ }
+ } else if (destination.IsQuickParameter()) {
+ InvokeDexCallingConvention calling_convention;
+ uint32_t argument_index = destination.GetQuickParameterIndex();
+ if (source.IsRegister()) {
+ __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsArm().AsRegisterPairLow());
+ __ str(source.AsArm().AsRegisterPairHigh(),
+ Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1, kArmWordSize)));
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ __ ldr(calling_convention.GetRegisterAt(argument_index), Address(SP, source.GetStackIndex()));
+ __ ldr(R0, Address(SP, source.GetHighStackIndex(kArmWordSize)));
+ __ str(R0, Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1, kArmWordSize)));
+ }
+ } else {
+ DCHECK(destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ if (source.AsArm().AsRegisterPair() == R1_R2) {
+ __ str(R1, Address(SP, destination.GetStackIndex()));
+ __ str(R2, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ } else {
+ __ StoreToOffset(kStoreWordPair, source.AsArm().AsRegisterPairLow(),
+ SP, destination.GetStackIndex());
+ }
+ } else if (source.IsQuickParameter()) {
+ InvokeDexCallingConvention calling_convention;
+ uint32_t argument_index = source.GetQuickParameterIndex();
+ __ str(calling_convention.GetRegisterAt(argument_index),
+ Address(SP, destination.GetStackIndex()));
+ __ ldr(R0,
+ Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1, kArmWordSize) + GetFrameSize()));
+ __ str(R0, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ __ ldr(R0, Address(SP, source.GetStackIndex()));
+ __ str(R0, Address(SP, destination.GetStackIndex()));
+ __ ldr(R0, Address(SP, source.GetHighStackIndex(kArmWordSize)));
+ __ str(R0, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ }
}
}
void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
if (instruction->AsIntConstant() != nullptr) {
- __ LoadImmediate(location.reg<Register>(), instruction->AsIntConstant()->GetValue());
+ int32_t value = instruction->AsIntConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.AsArm().AsCoreRegister(), value);
+ } else {
+ __ LoadImmediate(R0, value);
+ __ str(R0, Address(SP, location.GetStackIndex()));
+ }
+ } else if (instruction->AsLongConstant() != nullptr) {
+ int64_t value = instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.AsArm().AsRegisterPairLow(), Low32Bits(value));
+ __ LoadImmediate(location.AsArm().AsRegisterPairHigh(), High32Bits(value));
+ } else {
+ __ LoadImmediate(R0, Low32Bits(value));
+ __ str(R0, Address(SP, location.GetStackIndex()));
+ __ LoadImmediate(R0, High32Bits(value));
+ __ str(R0, Address(SP, location.GetHighStackIndex(kArmWordSize)));
+ }
} else if (instruction->AsLoadLocal() != nullptr) {
- __ LoadFromOffset(kLoadWord, location.reg<Register>(),
- SP, GetStackSlot(instruction->AsLoadLocal()->GetLocal()));
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ Move32(location, Location::StackSlot(stack_slot));
+ break;
+
+ case Primitive::kPrimLong:
+ Move64(location, Location::DoubleStackSlot(stack_slot));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type " << instruction->GetType();
+ }
} else {
// This can currently only happen when the instruction that requests the move
// is the next to be compiled.
DCHECK_EQ(instruction->GetNext(), move_for);
- __ mov(location.reg<Register>(),
- ShifterOperand(instruction->GetLocations()->Out().reg<Register>()));
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ Move32(location, instruction->GetLocations()->Out());
+ break;
+
+ case Primitive::kPrimLong:
+ Move64(location, instruction->GetLocations()->Out());
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type " << instruction->GetType();
+ }
}
}
@@ -114,13 +302,13 @@ void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
void LocationsBuilderARM::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
- locations->SetInAt(0, Location(R0));
+ locations->SetInAt(0, ArmCoreLocation(R0));
if_instr->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
// TODO: Generate the input as a condition, instead of materializing in a register.
- __ cmp(if_instr->GetLocations()->InAt(0).reg<Register>(), ShifterOperand(0));
+ __ cmp(if_instr->GetLocations()->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(0));
__ b(codegen_->GetLabelOf(if_instr->IfFalseSuccessor()), EQ);
if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) {
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -129,18 +317,18 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
void LocationsBuilderARM::VisitEqual(HEqual* equal) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(equal);
- locations->SetInAt(0, Location(R0));
- locations->SetInAt(1, Location(R1));
- locations->SetOut(Location(R0));
+ locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(1, ArmCoreLocation(R1));
+ locations->SetOut(ArmCoreLocation(R0));
equal->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitEqual(HEqual* equal) {
LocationSummary* locations = equal->GetLocations();
- __ teq(locations->InAt(0).reg<Register>(),
- ShifterOperand(locations->InAt(1).reg<Register>()));
- __ mov(locations->Out().reg<Register>(), ShifterOperand(1), EQ);
- __ mov(locations->Out().reg<Register>(), ShifterOperand(0), NE);
+ __ teq(locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(1), EQ);
+ __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(0), NE);
}
void LocationsBuilderARM::VisitLocal(HLocal* local) {
@@ -161,14 +349,27 @@ void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
- locations->SetInAt(1, Location(R0));
+ switch (store->InputAt(1)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << store->InputAt(1)->GetType();
+ }
store->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
- LocationSummary* locations = store->GetLocations();
- __ StoreToOffset(kStoreWord, locations->InAt(1).reg<Register>(),
- SP, codegen_->GetStackSlot(store->GetLocal()));
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -179,6 +380,14 @@ void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
}
+void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
+ constant->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+}
+
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
@@ -189,56 +398,83 @@ void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
void LocationsBuilderARM::VisitReturn(HReturn* ret) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
- locations->SetInAt(0, Location(R0));
+ switch (ret->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(0, ArmCoreLocation(R0));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
+ }
+
ret->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- DCHECK_EQ(ret->GetLocations()->InAt(0).reg<Register>(), R0);
+ if (kIsDebugBuild) {
+ switch (ret->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsCoreRegister(), R0);
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsRegisterPair(), R0_R1);
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
+ }
+ }
codegen_->GenerateFrameExit();
}
-static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
-static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+ locations->AddTemp(ArmCoreLocation(R0));
-class InvokeDexCallingConvention : public CallingConvention<Register> {
- public:
- InvokeDexCallingConvention()
- : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength) {}
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (int i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
-};
+ switch (invoke->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetOut(ArmCoreLocation(R0));
+ break;
-void LocationsBuilderARM::VisitPushArgument(HPushArgument* argument) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(argument);
- InvokeDexCallingConvention calling_convention;
- if (argument->GetArgumentIndex() < calling_convention.GetNumberOfRegisters()) {
- Location location = Location(calling_convention.GetRegisterAt(argument->GetArgumentIndex()));
- locations->SetInAt(0, location);
- locations->SetOut(location);
- } else {
- locations->SetInAt(0, Location(R0));
- }
- argument->SetLocations(locations);
-}
+ case Primitive::kPrimLong:
+ locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ break;
-void InstructionCodeGeneratorARM::VisitPushArgument(HPushArgument* argument) {
- uint8_t argument_index = argument->GetArgumentIndex();
- InvokeDexCallingConvention calling_convention;
- size_t parameter_registers = calling_convention.GetNumberOfRegisters();
- LocationSummary* locations = argument->GetLocations();
- if (argument_index >= parameter_registers) {
- uint8_t offset = calling_convention.GetStackOffsetOf(argument_index);
- __ StoreToOffset(kStoreWord, locations->InAt(0).reg<Register>(), SP, offset);
- } else {
- DCHECK_EQ(locations->Out().reg<Register>(), locations->InAt(0).reg<Register>());
+ case Primitive::kPrimVoid:
+ break;
+
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Unimplemented return type " << invoke->GetType();
+ break;
}
-}
-void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
- locations->AddTemp(Location(R0));
invoke->SetLocations(locations);
}
@@ -247,7 +483,7 @@ void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
invoke->GetIndexInDexCache() * kArmWordSize;
@@ -277,13 +513,30 @@ void LocationsBuilderARM::VisitAdd(HAdd* add) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add);
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location(R0));
- locations->SetInAt(1, Location(R1));
- locations->SetOut(Location(R0));
+ locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(1, ArmCoreLocation(R1));
+ locations->SetOut(ArmCoreLocation(R0));
break;
}
+
+ case Primitive::kPrimLong: {
+ locations->SetInAt(
+ 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(
+ 1, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R2_R3)));
+ locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented add type " << add->GetResultType();
}
add->SetLocations(locations);
}
@@ -292,12 +545,29 @@ void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
switch (add->GetResultType()) {
case Primitive::kPrimInt:
- __ add(locations->Out().reg<Register>(),
- locations->InAt(0).reg<Register>(),
- ShifterOperand(locations->InAt(1).reg<Register>()));
+ __ add(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
break;
+
+ case Primitive::kPrimLong:
+ __ adds(locations->Out().AsArm().AsRegisterPairLow(),
+ locations->InAt(0).AsArm().AsRegisterPairLow(),
+ ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
+ __ adc(locations->Out().AsArm().AsRegisterPairHigh(),
+ locations->InAt(0).AsArm().AsRegisterPairHigh(),
+ ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
+ break;
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented add type " << add->GetResultType();
}
}
@@ -305,13 +575,30 @@ void LocationsBuilderARM::VisitSub(HSub* sub) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location(R0));
- locations->SetInAt(1, Location(R1));
- locations->SetOut(Location(R0));
+ locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(1, ArmCoreLocation(R1));
+ locations->SetOut(ArmCoreLocation(R0));
break;
}
+
+ case Primitive::kPrimLong: {
+ locations->SetInAt(
+ 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(
+ 1, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R2_R3)));
+ locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
}
sub->SetLocations(locations);
}
@@ -320,12 +607,29 @@ void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
switch (sub->GetResultType()) {
case Primitive::kPrimInt:
- __ sub(locations->Out().reg<Register>(),
- locations->InAt(0).reg<Register>(),
- ShifterOperand(locations->InAt(1).reg<Register>()));
+ __ sub(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ break;
+
+ case Primitive::kPrimLong:
+ __ subs(locations->Out().AsArm().AsRegisterPairLow(),
+ locations->InAt(0).AsArm().AsRegisterPairLow(),
+ ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
+ __ sbc(locations->Out().AsArm().AsRegisterPairHigh(),
+ locations->InAt(0).AsArm().AsRegisterPairHigh(),
+ ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
break;
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
}
}
@@ -345,7 +649,7 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetOut(Location(R0));
+ locations->SetOut(ArmCoreLocation(R0));
instruction->SetLocations(locations);
}
@@ -363,36 +667,31 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = instruction->GetIndex();
- if (argument_index < calling_convention.GetNumberOfRegisters()) {
- locations->SetOut(Location(calling_convention.GetRegisterAt(argument_index)));
- } else {
- locations->SetOut(Location(R0));
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
}
+ locations->SetOut(location);
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- InvokeDexCallingConvention calling_convention;
- uint8_t argument_index = instruction->GetIndex();
- if (argument_index >= calling_convention.GetNumberOfRegisters()) {
- uint8_t offset = calling_convention.GetStackOffsetOf(argument_index);
- __ ldr(locations->Out().reg<Register>(), Address(SP, offset + codegen_->GetFrameSize()));
- }
+ // Nothing to do, the parameter is already at its location.
}
void LocationsBuilderARM::VisitNot(HNot* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, Location(R0));
- locations->SetOut(Location(R0));
+ locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetOut(ArmCoreLocation(R0));
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().reg<Register>(), locations->InAt(0).reg<Register>(), ShifterOperand(1));
+ __ eor(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(1));
}
} // namespace arm
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a51d85e40c..ed35f94e2b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -24,11 +24,45 @@
namespace art {
namespace arm {
+class CodeGeneratorARM;
+
static constexpr size_t kArmWordSize = 4;
+static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
+static constexpr RegisterPair kParameterCorePairRegisters[] = { R1_R2, R2_R3 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+class InvokeDexCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength) {}
+
+ RegisterPair GetRegisterPairAt(size_t argument_index) {
+ DCHECK_LT(argument_index + 1, GetNumberOfRegisters());
+ return kParameterCorePairRegisters[argument_index];
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ uint32_t gp_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
class LocationsBuilderARM : public HGraphVisitor {
public:
- explicit LocationsBuilderARM(HGraph* graph) : HGraphVisitor(graph) { }
+ explicit LocationsBuilderARM(HGraph* graph, CodeGeneratorARM* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
@@ -38,11 +72,12 @@ class LocationsBuilderARM : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
private:
+ CodeGeneratorARM* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
};
-class CodeGeneratorARM;
-
class InstructionCodeGeneratorARM : public HGraphVisitor {
public:
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
@@ -68,7 +103,7 @@ class CodeGeneratorARM : public CodeGenerator {
public:
explicit CodeGeneratorARM(HGraph* graph)
: CodeGenerator(graph),
- location_builder_(graph),
+ location_builder_(graph, this),
instruction_visitor_(graph, this) { }
virtual ~CodeGeneratorARM() { }
@@ -96,6 +131,11 @@ class CodeGeneratorARM : public CodeGenerator {
int32_t GetStackSlot(HLocal* local) const;
private:
+ // Helper method to move a 32bits value between two locations.
+ void Move32(Location destination, Location source);
+ // Helper method to move a 64bits value between two locations.
+ void Move64(Location destination, Location source);
+
LocationsBuilderARM location_builder_;
InstructionCodeGeneratorARM instruction_visitor_;
ArmAssembler assembler_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 88198dc471..114263161d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -17,6 +17,7 @@
#include "code_generator_x86.h"
#include "utils/assembler.h"
#include "utils/x86/assembler_x86.h"
+#include "utils/x86/managed_register_x86.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
@@ -24,11 +25,20 @@
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
namespace art {
+
+x86::X86ManagedRegister Location::AsX86() const {
+ return reg().AsX86();
+}
+
namespace x86 {
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
+static Location X86CpuLocation(Register reg) {
+ return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
+}
+
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
@@ -39,10 +49,13 @@ void CodeGeneratorX86::GenerateFrameEntry() {
static const int kFakeReturnRegister = 8;
core_spill_mask_ |= (1 << kFakeReturnRegister);
- // Add the current ART method to the frame size, the return PC, and the filler.
- SetFrameSize(RoundUp((
- GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs() + 3) * kX86WordSize,
+ SetFrameSize(RoundUp(
+ (GetGraph()->GetMaximumNumberOfOutVRegs() + GetGraph()->GetNumberOfVRegs()) * kVRegSize
+ + kVRegSize // filler
+ + kX86WordSize // Art method
+ + kNumberOfPushedRegistersAtEntry * kX86WordSize,
kStackAlignment));
+
// The return PC has already been pushed on the stack.
__ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
__ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
@@ -67,28 +80,208 @@ int32_t CodeGeneratorX86::GetStackSlot(HLocal* local) const {
if (reg_number >= number_of_vregs - number_of_in_vregs) {
// Local is a parameter of the method. It is stored in the caller's frame.
return GetFrameSize() + kX86WordSize // ART method
- + (reg_number - number_of_vregs + number_of_in_vregs) * kX86WordSize;
+ + (reg_number - number_of_vregs + number_of_in_vregs) * kVRegSize;
} else {
// Local is a temporary in this method. It is stored in this method's frame.
return GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kX86WordSize)
- - kX86WordSize // filler.
- - (number_of_vregs * kX86WordSize)
- + (reg_number * kX86WordSize);
+ - kVRegSize // filler.
+ - (number_of_vregs * kVRegSize)
+ + (reg_number * kVRegSize);
+ }
+}
+
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t index = gp_index_++;
+ if (index < calling_convention.GetNumberOfRegisters()) {
+ return X86CpuLocation(calling_convention.GetRegisterAt(index));
+ } else {
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(index, kX86WordSize));
+ }
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t index = gp_index_;
+ gp_index_ += 2;
+ if (index + 1 < calling_convention.GetNumberOfRegisters()) {
+ return Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index)));
+ } else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
+ return Location::QuickParameter(index);
+ } else {
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index, kX86WordSize));
+ }
+ }
+
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Unimplemented parameter type " << type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+ return Location();
+}
+
+void CodeGeneratorX86::Move32(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ } else {
+ DCHECK(source.IsStackSlot());
+ __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+ }
+ } else {
+ if (source.IsRegister()) {
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+ } else {
+ DCHECK(source.IsStackSlot());
+ __ movl(EAX, Address(ESP, source.GetStackIndex()));
+ __ movl(Address(ESP, destination.GetStackIndex()), EAX);
+ }
+ }
+}
+
+void CodeGeneratorX86::Move64(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ movl(destination.AsX86().AsRegisterPairLow(), source.AsX86().AsRegisterPairLow());
+ __ movl(destination.AsX86().AsRegisterPairHigh(), source.AsX86().AsRegisterPairHigh());
+ } else if (source.IsQuickParameter()) {
+ uint32_t argument_index = source.GetQuickParameterIndex();
+ InvokeDexCallingConvention calling_convention;
+ __ movl(destination.AsX86().AsRegisterPairLow(),
+ calling_convention.GetRegisterAt(argument_index));
+ __ movl(destination.AsX86().AsRegisterPairHigh(), Address(ESP,
+ calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize) + GetFrameSize()));
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ __ movl(destination.AsX86().AsRegisterPairLow(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.AsX86().AsRegisterPairHigh(),
+ Address(ESP, source.GetHighStackIndex(kX86WordSize)));
+ }
+ } else if (destination.IsQuickParameter()) {
+ InvokeDexCallingConvention calling_convention;
+ uint32_t argument_index = destination.GetQuickParameterIndex();
+ if (source.IsRegister()) {
+ __ movl(calling_convention.GetRegisterAt(argument_index), source.AsX86().AsRegisterPairLow());
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize)),
+ source.AsX86().AsRegisterPairHigh());
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ __ movl(calling_convention.GetRegisterAt(argument_index),
+ Address(ESP, source.GetStackIndex()));
+ __ movl(EAX, Address(ESP, source.GetHighStackIndex(kX86WordSize)));
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize)), EAX);
+ }
+ } else {
+ if (source.IsRegister()) {
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsRegisterPairLow());
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
+ source.AsX86().AsRegisterPairHigh());
+ } else if (source.IsQuickParameter()) {
+ InvokeDexCallingConvention calling_convention;
+ uint32_t argument_index = source.GetQuickParameterIndex();
+ __ movl(Address(ESP, destination.GetStackIndex()),
+ calling_convention.GetRegisterAt(argument_index));
+ __ movl(EAX, Address(ESP,
+ calling_convention.GetStackOffsetOf(argument_index + 1, kX86WordSize) + GetFrameSize()));
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), EAX);
+ } else {
+ DCHECK(source.IsDoubleStackSlot());
+ __ movl(EAX, Address(ESP, source.GetStackIndex()));
+ __ movl(Address(ESP, destination.GetStackIndex()), EAX);
+ __ movl(EAX, Address(ESP, source.GetHighStackIndex(kX86WordSize)));
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), EAX);
+ }
}
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
if (instruction->AsIntConstant() != nullptr) {
- __ movl(location.reg<Register>(), Immediate(instruction->AsIntConstant()->GetValue()));
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.AsX86().AsCpuRegister(), imm);
+ } else {
+ __ movl(Address(ESP, location.GetStackIndex()), imm);
+ }
+ } else if (instruction->AsLongConstant() != nullptr) {
+ int64_t value = instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ movl(location.AsX86().AsRegisterPairLow(), Immediate(Low32Bits(value)));
+ __ movl(location.AsX86().AsRegisterPairHigh(), Immediate(High32Bits(value)));
+ } else {
+ __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ }
} else if (instruction->AsLoadLocal() != nullptr) {
- __ movl(location.reg<Register>(),
- Address(ESP, GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ Move32(location, Location::StackSlot(GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ Move64(location, Location::DoubleStackSlot(
+ GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << instruction->GetType();
+ }
} else {
// This can currently only happen when the instruction that requests the move
// is the next to be compiled.
DCHECK_EQ(instruction->GetNext(), move_for);
- __ movl(location.reg<Register>(),
- instruction->GetLocations()->Out().reg<Register>());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ Move32(location, instruction->GetLocations()->Out());
+ break;
+
+ case Primitive::kPrimLong:
+ Move64(location, instruction->GetLocations()->Out());
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type " << instruction->GetType();
+ }
}
}
@@ -118,13 +311,13 @@ void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
- locations->SetInAt(0, Location(EAX));
+ locations->SetInAt(0, X86CpuLocation(EAX));
if_instr->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
// TODO: Generate the input as a condition, instead of materializing in a register.
- __ cmpl(if_instr->GetLocations()->InAt(0).reg<Register>(), Immediate(0));
+ __ cmpl(if_instr->GetLocations()->InAt(0).AsX86().AsCpuRegister(), Immediate(0));
__ j(kEqual, codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) {
__ jmp(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -147,29 +340,43 @@ void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
}
-void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* local) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(local);
- locations->SetInAt(1, Location(EAX));
- local->SetLocations(locations);
+void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ switch (store->InputAt(1)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << store->InputAt(1)->GetType();
+ }
+ store->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
- __ movl(Address(ESP, codegen_->GetStackSlot(store->GetLocal())),
- store->GetLocations()->InAt(1).reg<Register>());
}
void LocationsBuilderX86::VisitEqual(HEqual* equal) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(equal);
- locations->SetInAt(0, Location(EAX));
- locations->SetInAt(1, Location(ECX));
- locations->SetOut(Location(EAX));
+ locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(1, X86CpuLocation(ECX));
+ locations->SetOut(X86CpuLocation(EAX));
equal->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitEqual(HEqual* equal) {
- __ cmpl(equal->GetLocations()->InAt(0).reg<Register>(),
- equal->GetLocations()->InAt(1).reg<Register>());
- __ setb(kEqual, equal->GetLocations()->Out().reg<Register>());
+ __ cmpl(equal->GetLocations()->InAt(0).AsX86().AsCpuRegister(),
+ equal->GetLocations()->InAt(1).AsX86().AsCpuRegister());
+ __ setb(kEqual, equal->GetLocations()->Out().AsX86().AsCpuRegister());
}
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
@@ -180,6 +387,14 @@ void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
}
+void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
+ constant->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+}
+
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
@@ -191,78 +406,89 @@ void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
- locations->SetInAt(0, Location(EAX));
+ switch (ret->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(0, X86CpuLocation(EAX));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(
+ 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
+ }
ret->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
- DCHECK_EQ(ret->GetLocations()->InAt(0).reg<Register>(), EAX);
+ if (kIsDebugBuild) {
+ switch (ret->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsCpuRegister(), EAX);
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsRegisterPair(), EAX_EDX);
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
+ }
+ }
codegen_->GenerateFrameExit();
__ ret();
}
-static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
-static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-
-class InvokeDexCallingConvention : public CallingConvention<Register> {
- public:
- InvokeDexCallingConvention()
- : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
-};
-
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
+void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+ locations->AddTemp(X86CpuLocation(EAX));
-class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength) {}
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (int i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
+ switch (invoke->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetOut(X86CpuLocation(EAX));
+ break;
-void LocationsBuilderX86::VisitPushArgument(HPushArgument* argument) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(argument);
- InvokeDexCallingConvention calling_convention;
- if (argument->GetArgumentIndex() < calling_convention.GetNumberOfRegisters()) {
- Location location = Location(calling_convention.GetRegisterAt(argument->GetArgumentIndex()));
- locations->SetInAt(0, location);
- locations->SetOut(location);
- } else {
- locations->SetInAt(0, Location(EAX));
- }
- argument->SetLocations(locations);
-}
+ case Primitive::kPrimLong:
+ locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ break;
-void InstructionCodeGeneratorX86::VisitPushArgument(HPushArgument* argument) {
- uint8_t argument_index = argument->GetArgumentIndex();
- InvokeDexCallingConvention calling_convention;
- size_t parameter_registers = calling_convention.GetNumberOfRegisters();
- if (argument_index >= parameter_registers) {
- uint8_t offset = calling_convention.GetStackOffsetOf(argument_index);
- __ movl(Address(ESP, offset),
- argument->GetLocations()->InAt(0).reg<Register>());
+ case Primitive::kPrimVoid:
+ break;
- } else {
- DCHECK_EQ(argument->GetLocations()->Out().reg<Register>(),
- argument->GetLocations()->InAt(0).reg<Register>());
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Unimplemented return type " << invoke->GetType();
+ break;
}
-}
-void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
- locations->AddTemp(Location(EAX));
invoke->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+ Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
invoke->GetIndexInDexCache() * kX86WordSize;
@@ -289,13 +515,29 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add);
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location(EAX));
- locations->SetInAt(1, Location(ECX));
- locations->SetOut(Location(EAX));
+ locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(1, X86CpuLocation(ECX));
+ locations->SetOut(X86CpuLocation(EAX));
break;
}
+ case Primitive::kPrimLong: {
+ locations->SetInAt(
+ 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetInAt(
+ 1, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(ECX_EBX)));
+ locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented add type " << add->GetResultType();
}
add->SetLocations(locations);
}
@@ -303,12 +545,33 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) {
void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
switch (add->GetResultType()) {
- case Primitive::kPrimInt:
- DCHECK_EQ(locations->InAt(0).reg<Register>(), locations->Out().reg<Register>());
- __ addl(locations->InAt(0).reg<Register>(), locations->InAt(1).reg<Register>());
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->Out().AsX86().AsCpuRegister());
+ __ addl(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->InAt(1).AsX86().AsCpuRegister());
break;
+ }
+
+ case Primitive::kPrimLong: {
+ DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
+ locations->Out().AsX86().AsRegisterPair());
+ __ addl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ locations->InAt(1).AsX86().AsRegisterPairLow());
+ __ adcl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ locations->InAt(1).AsX86().AsRegisterPairHigh());
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented add type " << add->GetResultType();
}
}
@@ -316,13 +579,30 @@ void LocationsBuilderX86::VisitSub(HSub* sub) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location(EAX));
- locations->SetInAt(1, Location(ECX));
- locations->SetOut(Location(EAX));
+ locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(1, X86CpuLocation(ECX));
+ locations->SetOut(X86CpuLocation(EAX));
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ locations->SetInAt(
+ 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetInAt(
+ 1, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(ECX_EBX)));
+ locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
break;
}
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
}
sub->SetLocations(locations);
}
@@ -330,18 +610,39 @@ void LocationsBuilderX86::VisitSub(HSub* sub) {
void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
switch (sub->GetResultType()) {
- case Primitive::kPrimInt:
- DCHECK_EQ(locations->InAt(0).reg<Register>(), locations->Out().reg<Register>());
- __ subl(locations->InAt(0).reg<Register>(), locations->InAt(1).reg<Register>());
+ case Primitive::kPrimInt: {
+ DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->Out().AsX86().AsCpuRegister());
+ __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->InAt(1).AsX86().AsCpuRegister());
break;
+ }
+
+ case Primitive::kPrimLong: {
+ DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
+ locations->Out().AsX86().AsRegisterPair());
+ __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ locations->InAt(1).AsX86().AsRegisterPairLow());
+ __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ locations->InAt(1).AsX86().AsRegisterPairHigh());
+ break;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ break;
+
default:
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
}
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetOut(Location(EAX));
+ locations->SetOut(X86CpuLocation(EAX));
instruction->SetLocations(locations);
}
@@ -359,37 +660,31 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = instruction->GetIndex();
- if (argument_index < calling_convention.GetNumberOfRegisters()) {
- locations->SetOut(Location(calling_convention.GetRegisterAt(argument_index)));
- } else {
- locations->SetOut(Location(EAX));
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
}
+ locations->SetOut(location);
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = instruction->GetIndex();
- if (argument_index >= calling_convention.GetNumberOfRegisters()) {
- uint8_t offset = calling_convention.GetStackOffsetOf(argument_index);
- __ movl(locations->Out().reg<Register>(), Address(ESP, offset + codegen_->GetFrameSize()));
- }
+ // Nothing to do, the parameter is already at its location.
}
void LocationsBuilderX86::VisitNot(HNot* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, Location(EAX));
- locations->SetOut(Location(EAX));
+ locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetOut(X86CpuLocation(EAX));
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- DCHECK_EQ(locations->InAt(0).reg<Register>(), locations->Out().reg<Register>());
- __ xorl(locations->Out().reg<Register>(), Immediate(1));
+ DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(), locations->Out().AsX86().AsCpuRegister());
+ __ xorl(locations->Out().AsX86().AsCpuRegister(), Immediate(1));
}
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index bba81c0894..f22890e708 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -26,9 +26,43 @@ namespace x86 {
static constexpr size_t kX86WordSize = 4;
+class CodeGeneratorX86;
+
+static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
+static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+class InvokeDexCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters, kParameterCoreRegistersLength) {}
+
+ RegisterPair GetRegisterPairAt(size_t argument_index) {
+ DCHECK_LT(argument_index + 1, GetNumberOfRegisters());
+ return kParameterCorePairRegisters[argument_index];
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ uint32_t gp_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
class LocationsBuilderX86 : public HGraphVisitor {
public:
- explicit LocationsBuilderX86(HGraph* graph) : HGraphVisitor(graph) { }
+ LocationsBuilderX86(HGraph* graph, CodeGeneratorX86* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
@@ -38,11 +72,12 @@ class LocationsBuilderX86 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
private:
+ CodeGeneratorX86* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
};
-class CodeGeneratorX86;
-
class InstructionCodeGeneratorX86 : public HGraphVisitor {
public:
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
@@ -69,7 +104,7 @@ class CodeGeneratorX86 : public CodeGenerator {
public:
explicit CodeGeneratorX86(HGraph* graph)
: CodeGenerator(graph),
- location_builder_(graph),
+ location_builder_(graph, this),
instruction_visitor_(graph, this) { }
virtual ~CodeGeneratorX86() { }
@@ -97,6 +132,11 @@ class CodeGeneratorX86 : public CodeGenerator {
int32_t GetStackSlot(HLocal* local) const;
private:
+ // Helper method to move a 32bits value between two locations.
+ void Move32(Location destination, Location source);
+ // Helper method to move a 64bits value between two locations.
+ void Move64(Location destination, Location source);
+
LocationsBuilderX86 location_builder_;
InstructionCodeGeneratorX86 instruction_visitor_;
X86Assembler assembler_;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index adea0baa2d..3da9ed9461 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -226,10 +226,10 @@ class HBasicBlock : public ArenaObject {
M(InvokeStatic) \
M(LoadLocal) \
M(Local) \
+ M(LongConstant) \
M(NewInstance) \
M(Not) \
M(ParameterValue) \
- M(PushArgument) \
M(Return) \
M(ReturnVoid) \
M(StoreLocal) \
@@ -283,6 +283,8 @@ class HInstruction : public ArenaObject {
virtual void Accept(HGraphVisitor* visitor) = 0;
virtual const char* DebugName() const = 0;
+ virtual Primitive::Type GetType() const { return Primitive::kPrimVoid; }
+
void AddUse(HInstruction* user) {
uses_ = new (block_->GetGraph()->GetArena()) HUseListNode(user, uses_);
}
@@ -534,6 +536,7 @@ class HBinaryOperation : public HTemplateInstruction<2> {
Primitive::Type GetResultType() const { return result_type_; }
virtual bool IsCommutative() { return false; }
+ virtual Primitive::Type GetType() const { return GetResultType(); }
private:
const Primitive::Type result_type_;
@@ -550,6 +553,8 @@ class HEqual : public HBinaryOperation {
virtual bool IsCommutative() { return true; }
+ virtual Primitive::Type GetType() const { return Primitive::kPrimBoolean; }
+
DECLARE_INSTRUCTION(Equal)
private:
@@ -575,15 +580,19 @@ class HLocal : public HTemplateInstruction<0> {
// Load a given local. The local is an input of this instruction.
class HLoadLocal : public HTemplateInstruction<1> {
public:
- explicit HLoadLocal(HLocal* local) {
+ explicit HLoadLocal(HLocal* local, Primitive::Type type) : type_(type) {
SetRawInputAt(0, local);
}
+ virtual Primitive::Type GetType() const { return type_; }
+
HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
DECLARE_INSTRUCTION(LoadLocal)
private:
+ const Primitive::Type type_;
+
DISALLOW_COPY_AND_ASSIGN(HLoadLocal);
};
@@ -611,6 +620,7 @@ class HIntConstant : public HTemplateInstruction<0> {
explicit HIntConstant(int32_t value) : value_(value) { }
int32_t GetValue() const { return value_; }
+ virtual Primitive::Type GetType() const { return Primitive::kPrimInt; }
DECLARE_INSTRUCTION(IntConstant)
@@ -620,10 +630,30 @@ class HIntConstant : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
+class HLongConstant : public HTemplateInstruction<0> {
+ public:
+ explicit HLongConstant(int64_t value) : value_(value) { }
+
+ int64_t GetValue() const { return value_; }
+
+ virtual Primitive::Type GetType() const { return Primitive::kPrimLong; }
+
+ DECLARE_INSTRUCTION(LongConstant)
+
+ private:
+ const int64_t value_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLongConstant);
+};
+
class HInvoke : public HInstruction {
public:
- HInvoke(ArenaAllocator* arena, uint32_t number_of_arguments, uint32_t dex_pc)
+ HInvoke(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc)
: inputs_(arena, number_of_arguments),
+ return_type_(return_type),
dex_pc_(dex_pc) {
inputs_.SetSize(number_of_arguments);
}
@@ -635,10 +665,13 @@ class HInvoke : public HInstruction {
inputs_.Put(index, argument);
}
+ virtual Primitive::Type GetType() const { return return_type_; }
+
uint32_t GetDexPc() const { return dex_pc_; }
protected:
GrowableArray<HInstruction*> inputs_;
+ const Primitive::Type return_type_;
const uint32_t dex_pc_;
private:
@@ -649,9 +682,11 @@ class HInvokeStatic : public HInvoke {
public:
HInvokeStatic(ArenaAllocator* arena,
uint32_t number_of_arguments,
+ Primitive::Type return_type,
uint32_t dex_pc,
uint32_t index_in_dex_cache)
- : HInvoke(arena, number_of_arguments, dex_pc), index_in_dex_cache_(index_in_dex_cache) {}
+ : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+ index_in_dex_cache_(index_in_dex_cache) {}
uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
@@ -670,6 +705,8 @@ class HNewInstance : public HTemplateInstruction<0> {
uint32_t GetDexPc() const { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
+ virtual Primitive::Type GetType() const { return Primitive::kPrimNot; }
+
DECLARE_INSTRUCTION(NewInstance)
private:
@@ -679,24 +716,6 @@ class HNewInstance : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HNewInstance);
};
-// HPushArgument nodes are inserted after the evaluation of an argument
-// of a call. Their mere purpose is to ease the code generator's work.
-class HPushArgument : public HTemplateInstruction<1> {
- public:
- HPushArgument(HInstruction* argument, uint8_t argument_index) : argument_index_(argument_index) {
- SetRawInputAt(0, argument);
- }
-
- uint8_t GetArgumentIndex() const { return argument_index_; }
-
- DECLARE_INSTRUCTION(PushArgument)
-
- private:
- const uint8_t argument_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HPushArgument);
-};
-
class HAdd : public HBinaryOperation {
public:
HAdd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
@@ -727,10 +746,13 @@ class HSub : public HBinaryOperation {
// the calling convention.
class HParameterValue : public HTemplateInstruction<0> {
public:
- explicit HParameterValue(uint8_t index) : index_(index) {}
+ HParameterValue(uint8_t index, Primitive::Type parameter_type)
+ : index_(index), parameter_type_(parameter_type) {}
uint8_t GetIndex() const { return index_; }
+ virtual Primitive::Type GetType() const { return parameter_type_; }
+
DECLARE_INSTRUCTION(ParameterValue);
private:
@@ -738,6 +760,8 @@ class HParameterValue : public HTemplateInstruction<0> {
// than HGraph::number_of_in_vregs_;
const uint8_t index_;
+ const Primitive::Type parameter_type_;
+
DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
@@ -747,6 +771,8 @@ class HNot : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ virtual Primitive::Type GetType() const { return Primitive::kPrimBoolean; }
+
DECLARE_INSTRUCTION(Not);
private:
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 06ce3b4514..bfb2829a32 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -70,11 +70,13 @@ class ManagedRegister {
return ManagedRegister();
}
+ int RegId() const { return id_; }
+ explicit ManagedRegister(int reg_id) : id_(reg_id) { }
+
protected:
static const int kNoRegister = -1;
ManagedRegister() : id_(kNoRegister) { }
- explicit ManagedRegister(int reg_id) : id_(reg_id) { }
int id_;
};
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 7fae7a8b6f..034a795622 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -33,7 +33,8 @@ namespace x86 {
P(EDX, EDI) \
P(ECX, EBX) \
P(ECX, EDI) \
- P(EBX, EDI)
+ P(EBX, EDI) \
+ P(ECX, EDX)
struct RegisterPairDescriptor {
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 0201a96ad0..09d2b4919d 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -37,7 +37,8 @@ enum RegisterPair {
ECX_EBX = 7,
ECX_EDI = 8,
EBX_EDI = 9,
- kNumberOfRegisterPairs = 10,
+ ECX_EDX = 10, // Dalvik style passing
+ kNumberOfRegisterPairs = 11,
kNoRegisterPair = -1,
};
@@ -121,6 +122,12 @@ class X86ManagedRegister : public ManagedRegister {
return FromRegId(AllocIdHigh()).AsCpuRegister();
}
+ RegisterPair AsRegisterPair() const {
+ CHECK(IsRegisterPair());
+ return static_cast<RegisterPair>(id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds));
+ }
+
bool IsCpuRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfCpuRegIds);