diff options
author | Dave Allison <dallison@google.com> | 2014-07-25 16:15:27 -0700 |
---|---|---|
committer | Dave Allison <dallison@google.com> | 2014-08-13 09:01:41 -0700 |
commit | 648d7112609dd19c38131b3e71c37bcbbd19d11e (patch) | |
tree | 54062831327c660acb309e877e8d8df9ba0c2d5d /compiler | |
parent | 99c251bbd225dd97d0deece29559a430b12a0b66 (diff) | |
download | android_art-648d7112609dd19c38131b3e71c37bcbbd19d11e.tar.gz android_art-648d7112609dd19c38131b3e71c37bcbbd19d11e.tar.bz2 android_art-648d7112609dd19c38131b3e71c37bcbbd19d11e.zip |
Reduce stack usage for overflow checks
This reduces the stack space reserved for overflow checks to 12K, split
into an 8K gap and a 4K protected region. GC needs over 8K when running
in a stack overflow situation.
Also prevents signal runaway by detecting a signal inside code that
resulted from a signal handler invokation. And adds a max signal count to
the SignalTest to prevent it running forever.
Also reduces the number of iterations for the InterfaceTest as this was
taking (almost) forever with the --trace option on run-test.
Bug: 15435566
Change-Id: Id4fd46f22d52d42a9eb431ca07948673e8fda694
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/quick/arm/call_arm.cc | 11 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 10 | ||||
-rw-r--r-- | compiler/dex/quick/mips/call_mips.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 2 | ||||
-rw-r--r-- | compiler/utils/stack_checks.h | 7 |
8 files changed, 21 insertions, 17 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index b1339916f0..4ba3c4b9bb 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -354,13 +354,14 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm); NewLIR0(kPseudoMethodEntry); - const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm) - - Thread::kStackOverflowSignalReservedBytes; + const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm); bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes); + bool generate_explicit_stack_overflow_check = large_frame || + !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks(); if (!skip_overflow_check) { - if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + if (generate_explicit_stack_overflow_check) { if (!large_frame) { /* Load stack limit */ LockTemp(rs_r12); @@ -399,7 +400,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { const int spill_size = spill_count * 4; const int frame_size_without_spills = frame_size_ - spill_size; if (!skip_overflow_check) { - if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + if (generate_explicit_stack_overflow_check) { class StackOverflowSlowPath : public LIRSlowPath { public: StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index 28b747b591..0538c31fb8 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -329,16 +329,20 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm64); NewLIR0(kPseudoMethodEntry); + const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64); + const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes; + bool generate_explicit_stack_overflow_check = large_frame || + !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks(); const int spill_count = num_core_spills_ + num_fp_spills_; const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment. const int frame_size_without_spills = frame_size_ - spill_size; if (!skip_overflow_check) { - if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + if (generate_explicit_stack_overflow_check) { // Load stack limit LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1); } else { @@ -365,7 +369,7 @@ void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) } if (!skip_overflow_check) { - if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { + if (generate_explicit_stack_overflow_check) { class StackOverflowSlowPath: public LIRSlowPath { public: StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) : diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 4577a4c904..e8cb356186 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -303,7 +303,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) * We can safely skip the stack overflow check if we're * a leaf *and* our frame size < fudge factor. */ - bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kMips); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kMips); NewLIR0(kPseudoMethodEntry); RegStorage check_reg = AllocTemp(); RegStorage new_sp = AllocTemp(); diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index f5f86717b4..996689a46c 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -219,7 +219,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { * a leaf *and* our frame size < fudge factor. */ InstructionSet isa = cu_->target64 ? kX86_64 : kX86; - const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, isa); + bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa); // If we doing an implicit stack overflow check, perform the load immediately // before the stack pointer is decremented and anything is saved. diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index eccc970042..2c954a0502 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -265,7 +265,7 @@ InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGene codegen_(codegen) {} void CodeGeneratorARM::GenerateFrameEntry() { - bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kArm); + bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm); if (!skip_overflow_check) { if (kExplicitStackOverflowCheck) { SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM(); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index ab53b17636..35b811628b 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -241,7 +241,7 @@ void CodeGeneratorX86::GenerateFrameEntry() { static const int kFakeReturnRegister = 8; core_spill_mask_ |= (1 << kFakeReturnRegister); - bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86); + bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86); if (!skip_overflow_check && !kExplicitStackOverflowCheck) { __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86)))); RecordPcInfo(0); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e4259f51b4..c4571caf27 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -209,7 +209,7 @@ void CodeGeneratorX86_64::GenerateFrameEntry() { core_spill_mask_ |= (1 << kFakeReturnRegister); bool skip_overflow_check = IsLeafMethod() - && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86_64); + && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64); if (!skip_overflow_check && !kExplicitStackOverflowCheck) { __ testq(CpuRegister(RAX), Address( diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h index 63adbc2328..ce01077808 100644 --- a/compiler/utils/stack_checks.h +++ b/compiler/utils/stack_checks.h @@ -33,10 +33,9 @@ static constexpr size_t kSmallFrameSize = 1 * KB; // Determine whether a frame is small or large, used in the decision on whether to elide a // stack overflow check on method entry. // -// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the -// overflow-usable stack space. -static inline bool IsLargeFrame(size_t size, InstructionSet isa) { - return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4; +// A frame is considered large when it's above kLargeFrameSize. +static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) { + return size >= kLargeFrameSize; } } // namespace art |