summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc4
-rw-r--r--compiler/optimizing/codegen_test.cc109
-rw-r--r--compiler/optimizing/live_interval_test.cc6
-rw-r--r--compiler/optimizing/register_allocator.cc39
-rw-r--r--compiler/optimizing/register_allocator_test.cc9
-rw-r--r--compiler/optimizing/ssa_builder.cc6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc25
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h159
-rw-r--r--compiler/optimizing/stack_map_stream.h3
9 files changed, 212 insertions, 148 deletions
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 2db5a366df..d276457d01 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -89,13 +89,13 @@ class QuickCFITest : public CFITest {
m2l->CompilerInitializeRegAlloc();
for (const auto& info : m2l->reg_pool_->core_regs_) {
if (m2l->num_core_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
- m2l->core_spill_mask_ |= 1 << info->GetReg().GetReg();
+ m2l->core_spill_mask_ |= 1 << info->GetReg().GetRegNum();
m2l->num_core_spills_++;
}
}
for (const auto& info : m2l->reg_pool_->sp_regs_) {
if (m2l->num_fp_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
- m2l->fp_spill_mask_ |= 1 << info->GetReg().GetReg();
+ m2l->fp_spill_mask_ |= 1 << info->GetReg().GetRegNum();
m2l->num_fp_spills_++;
}
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index afcff1ef65..94f56e5d3e 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -18,8 +18,10 @@
#include "arch/instruction_set.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm/registers_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
#include "arch/x86/instruction_set_features_x86.h"
+#include "arch/x86/registers_x86.h"
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "base/macros.h"
#include "builder.h"
@@ -37,6 +39,8 @@
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
#include "utils.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/x86/managed_register_x86.h"
#include "gtest/gtest.h"
@@ -53,17 +57,42 @@ class TestCodeGeneratorARM : public arm::CodeGeneratorARM {
const ArmInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options)
: arm::CodeGeneratorARM(graph, isa_features, compiler_options) {
- AddAllocatedRegister(Location::RegisterLocation(6));
- AddAllocatedRegister(Location::RegisterLocation(7));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R6));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
arm::CodeGeneratorARM::SetupBlockedRegisters(is_baseline);
- blocked_core_registers_[4] = true;
- blocked_core_registers_[6] = false;
- blocked_core_registers_[7] = false;
+ blocked_core_registers_[arm::R4] = true;
+ blocked_core_registers_[arm::R6] = false;
+ blocked_core_registers_[arm::R7] = false;
// Makes pair R6-R7 available.
- blocked_register_pairs_[6 >> 1] = false;
+ blocked_register_pairs_[arm::R6_R7] = false;
+ }
+};
+
+class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
+ public:
+ TestCodeGeneratorX86(HGraph* graph,
+ const X86InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : x86::CodeGeneratorX86(graph, isa_features, compiler_options) {
+ // Save edi, we need it for getting enough registers for long multiplication.
+ AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
+ }
+
+ void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
+ x86::CodeGeneratorX86::SetupBlockedRegisters(is_baseline);
+ // ebx is a callee-save register in C, but caller-save for ART.
+ blocked_core_registers_[x86::EBX] = true;
+ blocked_register_pairs_[x86::EAX_EBX] = true;
+ blocked_register_pairs_[x86::EDX_EBX] = true;
+ blocked_register_pairs_[x86::ECX_EBX] = true;
+ blocked_register_pairs_[x86::EBX_EDI] = true;
+
+ // Make edi available.
+ blocked_core_registers_[x86::EDI] = false;
+ blocked_register_pairs_[x86::ECX_EDI] = false;
}
};
@@ -101,7 +130,7 @@ static void Run(const InternalCodeAllocator& allocator,
}
Expected result = f();
if (has_result) {
- ASSERT_EQ(result, expected);
+ ASSERT_EQ(expected, result);
}
}
@@ -112,7 +141,7 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
CompilerOptions compiler_options;
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
+ TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
// We avoid doing a stack overflow check that requires the runtime being setup,
// by making sure the compiler knows the methods we are running are leaf methods.
codegenX86.CompileBaseline(&allocator, true);
@@ -520,29 +549,49 @@ TEST(CodegenTest, NonMaterializedCondition) {
RunCodeOptimized(graph, hook_before_codegen, true, 0);
}
-#define MUL_TEST(TYPE, TEST_NAME) \
- TEST(CodegenTest, Return ## TEST_NAME) { \
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
- Instruction::CONST_4 | 3 << 12 | 0, \
- Instruction::CONST_4 | 4 << 12 | 1 << 8, \
- Instruction::MUL_ ## TYPE, 1 << 8 | 0, \
- Instruction::RETURN); \
- \
- TestCode(data, true, 12); \
- } \
- \
- TEST(CodegenTest, Return ## TEST_NAME ## 2addr) { \
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
- Instruction::CONST_4 | 3 << 12 | 0, \
- Instruction::CONST_4 | 4 << 12 | 1 << 8, \
- Instruction::MUL_ ## TYPE ## _2ADDR | 1 << 12, \
- Instruction::RETURN); \
- \
- TestCode(data, true, 12); \
- }
+TEST(CodegenTest, ReturnMulInt) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 3 << 12 | 0,
+ Instruction::CONST_4 | 4 << 12 | 1 << 8,
+ Instruction::MUL_INT, 1 << 8 | 0,
+ Instruction::RETURN);
+
+ TestCode(data, true, 12);
+}
+
+TEST(CodegenTest, ReturnMulInt2addr) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 3 << 12 | 0,
+ Instruction::CONST_4 | 4 << 12 | 1 << 8,
+ Instruction::MUL_INT_2ADDR | 1 << 12,
+ Instruction::RETURN);
-MUL_TEST(INT, MulInt);
-MUL_TEST(LONG, MulLong);
+ TestCode(data, true, 12);
+}
+
+TEST(CodegenTest, ReturnMulLong) {
+ const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 3 << 12 | 0,
+ Instruction::CONST_4 | 0 << 12 | 1 << 8,
+ Instruction::CONST_4 | 4 << 12 | 2 << 8,
+ Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::MUL_LONG, 2 << 8 | 0,
+ Instruction::RETURN_WIDE);
+
+ TestCodeLong(data, true, 12);
+}
+
+TEST(CodegenTest, ReturnMulLong2addr) {
+ const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 3 << 12 | 0 << 8,
+ Instruction::CONST_4 | 0 << 12 | 1 << 8,
+ Instruction::CONST_4 | 4 << 12 | 2 << 8,
+ Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::MUL_LONG_2ADDR | 2 << 12,
+ Instruction::RETURN_WIDE);
+
+ TestCodeLong(data, true, 12);
+}
TEST(CodegenTest, ReturnMulIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index 28000c18f8..405f261986 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -84,13 +84,13 @@ TEST(LiveInterval, Covers) {
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ ASSERT_FALSE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(11));
- ASSERT_TRUE(interval->Covers(14));
- ASSERT_TRUE(interval->Covers(15));
- ASSERT_FALSE(interval->Covers(0));
ASSERT_FALSE(interval->Covers(12));
ASSERT_FALSE(interval->Covers(13));
+ ASSERT_TRUE(interval->Covers(14));
+ ASSERT_TRUE(interval->Covers(15));
ASSERT_FALSE(interval->Covers(16));
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index a02b1dacf7..6350b35ca1 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -332,6 +332,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
}
current->AddSafepoint(safepoint);
}
+ current->ResetSearchCache();
// Some instructions define their output in fixed register/stack slot. We need
// to ensure we know these locations before doing register allocation. For a
@@ -666,6 +667,7 @@ static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* fr
DCHECK(!interval->IsHighInterval());
// Note that the same instruction may occur multiple times in the input list,
// so `free_until` may have changed already.
+ // Since `position` is not the current scan position, we need to use CoversSlow.
if (interval->IsDeadAt(position)) {
// Set the register to be free. Note that inactive intervals might later
// update this.
@@ -674,12 +676,12 @@ static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* fr
DCHECK(interval->GetHighInterval()->IsDeadAt(position));
free_until[interval->GetHighInterval()->GetRegister()] = kMaxLifetimePosition;
}
- } else if (!interval->Covers(position)) {
+ } else if (!interval->CoversSlow(position)) {
// The interval becomes inactive at `defined_by`. We make its register
// available only until the next use strictly after `defined_by`.
free_until[interval->GetRegister()] = interval->FirstUseAfter(position);
if (interval->HasHighInterval()) {
- DCHECK(!interval->GetHighInterval()->Covers(position));
+ DCHECK(!interval->GetHighInterval()->CoversSlow(position));
free_until[interval->GetHighInterval()->GetRegister()] = free_until[interval->GetRegister()];
}
}
@@ -720,7 +722,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
// the linear scan algorithm. So we use `defined_by`'s end lifetime
// position to check whether the input is dead or is inactive after
// `defined_by`.
- DCHECK(interval->Covers(defined_by->GetLifetimePosition()));
+ DCHECK(interval->CoversSlow(defined_by->GetLifetimePosition()));
size_t position = defined_by->GetLifetimePosition() + 1;
FreeIfNotCoverAt(interval, position, free_until);
}
@@ -1438,7 +1440,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
use = use->GetNext();
}
while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
- DCHECK(current->Covers(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
+ DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
LocationSummary* locations = use->GetUser()->GetLocations();
if (use->GetIsEnvironment()) {
locations->SetEnvironmentAt(use->GetInputIndex(), source);
@@ -1475,7 +1477,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
safepoint_position != nullptr;
safepoint_position = safepoint_position->GetNext()) {
- DCHECK(current->Covers(safepoint_position->GetPosition()));
+ DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
@@ -1538,35 +1540,14 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
return;
}
- // Intervals end at the lifetime end of a block. The decrement by one
- // ensures the `Cover` call will return true.
- size_t from_position = from->GetLifetimeEnd() - 1;
- size_t to_position = to->GetLifetimeStart();
-
- LiveInterval* destination = nullptr;
- LiveInterval* source = nullptr;
-
- LiveInterval* current = interval;
-
- // Check the intervals that cover `from` and `to`.
- while ((current != nullptr) && (source == nullptr || destination == nullptr)) {
- if (current->Covers(from_position)) {
- DCHECK(source == nullptr);
- source = current;
- }
- if (current->Covers(to_position)) {
- DCHECK(destination == nullptr);
- destination = current;
- }
-
- current = current->GetNextSibling();
- }
+ // Find the intervals that cover `from` and `to`.
+ LiveInterval* destination = interval->GetSiblingAt(to->GetLifetimeStart());
+ LiveInterval* source = interval->GetSiblingAt(from->GetLifetimeEnd() - 1);
if (destination == source) {
// Interval was not split.
return;
}
-
DCHECK(destination != nullptr && source != nullptr);
if (!destination->HasRegister()) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index c307d98555..182cd0e833 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -433,18 +433,15 @@ TEST(RegisterAllocatorTest, FreeUntil) {
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5c3d9bf725..7a252af2ad 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -186,11 +186,9 @@ void SsaBuilder::FixNullConstantType() {
HInstruction* right = equality_instr->InputAt(1);
HInstruction* null_instr = nullptr;
- if ((left->GetType() == Primitive::kPrimNot)
- && (right->IsNullConstant() || right->IsIntConstant())) {
+ if ((left->GetType() == Primitive::kPrimNot) && right->IsIntConstant()) {
null_instr = right;
- } else if ((right->GetType() == Primitive::kPrimNot)
- && (left->IsNullConstant() || left->IsIntConstant())) {
+ } else if ((right->GetType() == Primitive::kPrimNot) && left->IsIntConstant()) {
null_instr = left;
} else {
continue;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 302df2a1d2..ea0e7c3712 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -402,11 +402,11 @@ int LiveInterval::FindHintAtDefinition() const {
for (size_t i = 0, e = defined_by_->InputCount(); i < e; ++i) {
HInstruction* input = defined_by_->InputAt(i);
size_t end = predecessors.Get(i)->GetLifetimeEnd();
- const LiveInterval& input_interval = input->GetLiveInterval()->GetIntervalAt(end - 1);
- if (input_interval.GetEnd() == end) {
+ LiveInterval* input_interval = input->GetLiveInterval()->GetSiblingAt(end - 1);
+ if (input_interval->GetEnd() == end) {
// If the input dies at the end of the predecessor, we know its register can
// be reused.
- Location input_location = input_interval.ToLocation();
+ Location input_location = input_interval->ToLocation();
if (input_location.IsRegisterKind()) {
DCHECK(SameRegisterKind(input_location));
return RegisterOrLowRegister(input_location);
@@ -418,12 +418,12 @@ int LiveInterval::FindHintAtDefinition() const {
Location out = locations->Out();
if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) {
// Try to use the same register as the first input.
- const LiveInterval& input_interval =
- GetDefinedBy()->InputAt(0)->GetLiveInterval()->GetIntervalAt(GetStart() - 1);
- if (input_interval.GetEnd() == GetStart()) {
+ LiveInterval* input_interval =
+ GetDefinedBy()->InputAt(0)->GetLiveInterval()->GetSiblingAt(GetStart() - 1);
+ if (input_interval->GetEnd() == GetStart()) {
// If the input dies at the start of this instruction, we know its register can
// be reused.
- Location location = input_interval.ToLocation();
+ Location location = input_interval->ToLocation();
if (location.IsRegisterKind()) {
DCHECK(SameRegisterKind(location));
return RegisterOrLowRegister(location);
@@ -487,16 +487,17 @@ Location LiveInterval::ToLocation() const {
}
Location LiveInterval::GetLocationAt(size_t position) {
- return GetIntervalAt(position).ToLocation();
+ LiveInterval* sibling = GetSiblingAt(position);
+ DCHECK(sibling != nullptr);
+ return sibling->ToLocation();
}
-const LiveInterval& LiveInterval::GetIntervalAt(size_t position) {
+LiveInterval* LiveInterval::GetSiblingAt(size_t position) {
LiveInterval* current = this;
- while (!current->Covers(position)) {
+ while (current != nullptr && !current->IsDefinedAt(position)) {
current = current->GetNextSibling();
- DCHECK(current != nullptr);
}
- return *current;
+ return current;
}
} // namespace art
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 98f98a29d1..8eb98a186b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -274,8 +274,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
size_t start_block_position = instruction->GetBlock()->GetLifetimeStart();
if (first_range_ == nullptr) {
// First time we see a use of that interval.
- first_range_ = last_range_ = new (allocator_) LiveRange(
- start_block_position, position, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start_block_position, position, nullptr);
} else if (first_range_->GetStart() == start_block_position) {
// There is a use later in the same block or in a following block.
// Note that in such a case, `AddRange` for the whole blocks has been called
@@ -290,7 +290,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// predecessor/successor. When two blocks are predecessor/successor, the
// liveness algorithm has called `AddRange` before arriving in this method,
// and the check line 205 would succeed.
- first_range_ = new (allocator_) LiveRange(start_block_position, position, first_range_);
+ first_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start_block_position, position, first_range_);
}
}
@@ -302,7 +303,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
void AddRange(size_t start, size_t end) {
if (first_range_ == nullptr) {
- first_range_ = last_range_ = new (allocator_) LiveRange(start, end, first_range_);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, first_range_);
} else if (first_range_->GetStart() == end) {
// There is a use in the following block.
first_range_->start_ = start;
@@ -311,7 +313,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
} else {
DCHECK_GT(first_range_->GetStart(), end);
// There is a hole in the interval. Create a new range.
- first_range_ = new (allocator_) LiveRange(start, end, first_range_);
+ first_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, first_range_);
}
}
@@ -328,15 +330,15 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
- first_range_ = after_loop;
+ first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
first_range_->start_ = start;
} else {
// The use after the loop is after a lifetime hole.
DCHECK(last_in_loop != nullptr);
- first_range_ = last_in_loop;
+ first_range_ = range_search_start_ = last_in_loop;
first_range_->start_ = start;
first_range_->end_ = end;
}
@@ -357,7 +359,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Instruction without uses.
DCHECK(!defined_by_->HasNonEnvironmentUses());
DCHECK(from == defined_by_->GetLifetimePosition());
- first_range_ = last_range_ = new (allocator_) LiveRange(from, from + 2, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(from, from + 2, nullptr);
}
}
@@ -372,21 +375,43 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
bool HasRegister() const { return register_ != kNoRegister; }
bool IsDeadAt(size_t position) const {
- return last_range_->GetEnd() <= position;
+ return GetEnd() <= position;
}
- bool Covers(size_t position) {
- return !IsDeadAt(position) && FindRangeAt(position) != nullptr;
+ bool IsDefinedAt(size_t position) const {
+ return GetStart() <= position && !IsDeadAt(position);
}
- /**
- * Returns the first intersection of this interval with `other`.
- */
- size_t FirstIntersectionWith(LiveInterval* other) const {
+ // Returns true if the interval contains a LiveRange covering `position`.
+ // The range at or immediately after the current position of linear scan
+ // is cached for better performance. If `position` can be smaller than
+ // that, CoversSlow should be used instead.
+ bool Covers(size_t position) {
+ LiveRange* candidate = FindRangeAtOrAfter(position, range_search_start_);
+ range_search_start_ = candidate;
+ return (candidate != nullptr && candidate->GetStart() <= position);
+ }
+
+ // Same as Covers but always tests all ranges.
+ bool CoversSlow(size_t position) const {
+ LiveRange* candidate = FindRangeAtOrAfter(position, first_range_);
+ return candidate != nullptr && candidate->GetStart() <= position;
+ }
+
+ // Returns the first intersection of this interval with `current`, which
+ // must be the interval currently being allocated by linear scan.
+ size_t FirstIntersectionWith(LiveInterval* current) const {
+ // Find the first range after the start of `current`. We use the search
+ // cache to improve performance.
+ DCHECK(GetStart() <= current->GetStart() || IsFixed());
+ LiveRange* other_range = current->first_range_;
+ LiveRange* my_range = FindRangeAtOrAfter(other_range->GetStart(), range_search_start_);
+ if (my_range == nullptr) {
+ return kNoLifetime;
+ }
+
// Advance both intervals and find the first matching range start in
// this interval.
- LiveRange* my_range = first_range_;
- LiveRange* other_range = other->first_range_;
do {
if (my_range->IsBefore(*other_range)) {
my_range = my_range->GetNext();
@@ -513,7 +538,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
DCHECK(!is_fixed_);
DCHECK_GT(position, GetStart());
- if (last_range_->GetEnd() <= position) {
+ if (GetEnd() <= position) {
// This range dies before `position`, no need to split.
return nullptr;
}
@@ -537,7 +562,6 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
new_interval->parent_ = parent_;
new_interval->first_use_ = first_use_;
- last_visited_range_ = nullptr;
LiveRange* current = first_range_;
LiveRange* previous = nullptr;
// Iterate over the ranges, and either find a range that covers this position, or
@@ -557,6 +581,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
last_range_ = previous;
previous->next_ = nullptr;
new_interval->first_range_ = current;
+ if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
+ // Search start point is inside `new_interval`. Change it to nullptr
+ // (i.e. the end of the interval) in the original interval.
+ range_search_start_ = nullptr;
+ }
+ new_interval->range_search_start_ = new_interval->first_range_;
return new_interval;
} else {
// This range covers position. We create a new last_range_ for this interval
@@ -572,6 +602,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
new_interval->first_range_ = current;
current->start_ = position;
+ if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
+ // Search start point is inside `new_interval`. Change it to `last_range`
+ // in the original interval. This is conservative but always correct.
+ range_search_start_ = last_range_;
+ }
+ new_interval->range_search_start_ = new_interval->first_range_;
return new_interval;
}
} while (current != nullptr);
@@ -643,8 +679,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Returns the location of the interval following its siblings at `position`.
Location GetLocationAt(size_t position);
- // Finds the interval that covers `position`.
- const LiveInterval& GetIntervalAt(size_t position);
+ // Finds the sibling that is defined at `position`.
+ LiveInterval* GetSiblingAt(size_t position);
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
@@ -697,7 +733,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
high_or_low_interval_->high_or_low_interval_ = this;
if (first_range_ != nullptr) {
high_or_low_interval_->first_range_ = first_range_->Dup(allocator_);
- high_or_low_interval_->last_range_ = first_range_->GetLastRange();
+ high_or_low_interval_->last_range_ = high_or_low_interval_->first_range_->GetLastRange();
+ high_or_low_interval_->range_search_start_ = high_or_low_interval_->first_range_;
}
if (first_use_ != nullptr) {
high_or_low_interval_->first_use_ = first_use_->Dup(allocator_);
@@ -707,12 +744,14 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Returns whether an interval, when it is non-split, is using
// the same register of one of its input.
bool IsUsingInputRegister() const {
+ CHECK(kIsDebugBuild) << "Function should be used only for DCHECKs";
if (defined_by_ != nullptr && !IsSplit()) {
for (HInputIterator it(defined_by_); !it.Done(); it.Advance()) {
LiveInterval* interval = it.Current()->GetLiveInterval();
- // Find the interval that covers `defined_by`_.
- while (interval != nullptr && !interval->Covers(defined_by_->GetLifetimePosition())) {
+ // Find the interval that covers `defined_by`_. Calls to this function
+ // are made outside the linear scan, hence we need to use CoversSlow.
+ while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
@@ -731,6 +770,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// the same register of one of its input. Note that this method requires
// IsUsingInputRegister() to be true.
bool CanUseInputRegister() const {
+ CHECK(kIsDebugBuild) << "Function should be used only for DCHECKs";
DCHECK(IsUsingInputRegister());
if (defined_by_ != nullptr && !IsSplit()) {
LocationSummary* locations = defined_by_->GetLocations();
@@ -740,8 +780,9 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
for (HInputIterator it(defined_by_); !it.Done(); it.Advance()) {
LiveInterval* interval = it.Current()->GetLiveInterval();
- // Find the interval that covers `defined_by`_.
- while (interval != nullptr && !interval->Covers(defined_by_->GetLifetimePosition())) {
+ // Find the interval that covers `defined_by`_. Calls to this function
+ // are made outside the linear scan, hence we need to use CoversSlow.
+ while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
@@ -750,7 +791,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
&& interval->GetRegister() == GetRegister()) {
// We found the input that has the same register. Check if it is live after
// `defined_by`_.
- return !interval->Covers(defined_by_->GetLifetimePosition() + 1);
+ return !interval->CoversSlow(defined_by_->GetLifetimePosition() + 1);
}
}
}
@@ -773,6 +814,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
return first_safepoint_;
}
+ // Resets the starting point for range-searching queries to the first range.
+ // Intervals must be reset prior to starting a new linear scan over them.
+ void ResetSearchCache() {
+ range_search_start_ = first_range_;
+ }
+
private:
LiveInterval(ArenaAllocator* allocator,
Primitive::Type type,
@@ -785,9 +832,9 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
+ range_search_start_(nullptr),
first_safepoint_(nullptr),
last_safepoint_(nullptr),
- last_visited_range_(nullptr),
first_use_(nullptr),
type_(type),
next_sibling_(nullptr),
@@ -801,39 +848,29 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
high_or_low_interval_(nullptr),
defined_by_(defined_by) {}
- // Returns a LiveRange covering the given position or nullptr if no such range
- // exists in the interval.
- // This is a linear search optimized for multiple queries in a non-decreasing
- // position order typical for linear scan register allocation.
- LiveRange* FindRangeAt(size_t position) {
- // Make sure operations on the interval didn't leave us with a cached result
- // from a sibling.
+ // Searches for a LiveRange that either covers the given position or is the
+ // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // known to end before `position` can be skipped with `search_start`.
+ LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
- if (last_visited_range_ != nullptr) {
- DCHECK_GE(last_visited_range_->GetStart(), GetStart());
- DCHECK_LE(last_visited_range_->GetEnd(), GetEnd());
+ if (search_start != first_range_) {
+ // If we are not searching the entire list of ranges, make sure we do
+ // not skip the range we are searching for.
+ if (search_start == nullptr) {
+ DCHECK(IsDeadAt(position));
+ } else if (search_start->GetStart() > position) {
+ DCHECK_EQ(search_start, FindRangeAtOrAfter(position, first_range_));
+ }
}
}
- // If this method was called earlier on a lower position, use that result as
- // a starting point to save time. However, linear scan performs 3 scans:
- // integers, floats, and resolution. Instead of resetting at the beginning
- // of a scan, we do it here.
- LiveRange* current;
- if (last_visited_range_ != nullptr && position >= last_visited_range_->GetStart()) {
- current = last_visited_range_;
- } else {
- current = first_range_;
- }
- while (current != nullptr && current->GetEnd() <= position) {
- current = current->GetNext();
- }
- last_visited_range_ = current;
- if (current != nullptr && position >= current->GetStart()) {
- return current;
- } else {
- return nullptr;
+ LiveRange* range;
+ for (range = search_start;
+ range != nullptr && range->GetEnd() <= position;
+ range = range->GetNext()) {
+ continue;
}
+ return range;
}
ArenaAllocator* const allocator_;
@@ -843,14 +880,14 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
LiveRange* first_range_;
LiveRange* last_range_;
+ // The first range at or after the current position of a linear scan. It is
+ // used to optimize range-searching queries.
+ LiveRange* range_search_start_;
+
// Safepoints where this interval is live.
SafepointPosition* first_safepoint_;
SafepointPosition* last_safepoint_;
- // Last visited range. This is a range search optimization leveraging the fact
- // that the register allocator does a linear scan through the intervals.
- LiveRange* last_visited_range_;
-
// Uses of this interval. Note that this linked list is shared amongst siblings.
UsePosition* first_use_;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index a73c8d77f3..9a9e068a9b 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -386,7 +386,8 @@ class StackMapStream : public ValueObject {
}
entry.live_dex_registers_mask->SetBit(dex_register);
- entry.dex_register_map_hash += (1 << dex_register);
+ entry.dex_register_map_hash +=
+ (1 << (dex_register % (sizeof(entry.dex_register_map_hash) * kBitsPerByte)));
entry.dex_register_map_hash += static_cast<uint32_t>(value);
entry.dex_register_map_hash += static_cast<uint32_t>(kind);
stack_maps_.Put(stack_maps_.Size() - 1, entry);