summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.cc9
-rw-r--r--compiler/optimizing/code_generator.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc26
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.h5
-rw-r--r--compiler/optimizing/code_generator_x86.cc24
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc27
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
9 files changed, 58 insertions, 44 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9d172638e1..6b5ec1d6ca 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -632,4 +632,13 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend
}
}
+void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+ MoveOperands move1(from1, to1, nullptr);
+ MoveOperands move2(from2, to2, nullptr);
+ HParallelMove parallel_move(GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ GetMoveResolver()->EmitNativeCode(&parallel_move);
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fc4ea4b5d3..ac4fc67c2c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -33,6 +33,7 @@ static size_t constexpr kUninitializedFrameSize = 0;
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
+class ParallelMoveResolver;
class SrcMap;
class CodeAllocator {
@@ -165,6 +166,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+
protected:
CodeGenerator(HGraph* graph,
size_t number_of_core_registers,
@@ -197,6 +200,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c2e9a2e5da..b336c31edd 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -169,11 +169,14 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
@@ -290,16 +293,11 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(class_to_check_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5d519937f4..e3a13a2b8d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -186,7 +186,7 @@ class CodeGeneratorARM : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverARM* GetMoveResolver() {
+ ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f2ead21e15..54e87f4d9c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -230,6 +230,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
+ return nullptr;
+ }
+
private:
// Labels for each block that will be compiled.
vixl::Label* block_labels_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index bed44b2940..d537b1792d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -140,9 +140,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -291,16 +296,11 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(class_to_check_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ x86_codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 85fe21ca76..5ba05af2a8 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -181,7 +181,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverX86* GetMoveResolver() {
+ ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 112c17975a..7545e4dfd7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -179,13 +179,15 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -305,16 +307,11 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(class_to_check_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
if (instruction_->IsInstanceOf()) {
__ gs()->call(
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9565b6f876..6112530dfb 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -171,7 +171,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return &assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() {
+ ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}