summaryrefslogtreecommitdiffstats
path: root/src/runtime-profiler.cc
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2012-04-12 10:51:47 +0100
committerBen Murdoch <benm@google.com>2012-04-16 16:41:38 +0100
commit3ef787dbeca8a5fb1086949cda830dccee07bfbd (patch)
tree0a22edd97aa148abffdd405c585b22213fccbc82 /src/runtime-profiler.cc
parent85b71799222b55eb5dd74ea26efe0c64ab655c8c (diff)
downloadandroid_external_v8-3ef787dbeca8a5fb1086949cda830dccee07bfbd.tar.gz
android_external_v8-3ef787dbeca8a5fb1086949cda830dccee07bfbd.tar.bz2
android_external_v8-3ef787dbeca8a5fb1086949cda830dccee07bfbd.zip
Merge V8 at 3.9.24.13
Bug: 5688872 Change-Id: Id0aa8d23375030494d3189c31774059c0f5398fc
Diffstat (limited to 'src/runtime-profiler.cc')
-rw-r--r--src/runtime-profiler.cc226
1 files changed, 174 insertions, 52 deletions
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 26d88461..6ed4ff48 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,6 +35,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
+#include "isolate-inl.h"
#include "mark-compact.h"
#include "platform.h"
#include "scopeinfo.h"
@@ -45,6 +46,8 @@ namespace internal {
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
+
+// Constants for statistical profiler.
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
@@ -57,14 +60,24 @@ static const int kSamplerThresholdSizeFactorInit = 3;
static const int kSizeLimit = 1500;
+// Constants for counter based profiler.
+
+// Number of times a function has to be seen on the stack before it is
+// optimized.
+static const int kProfilerTicksBeforeOptimization = 2;
+
+// Maximum size in bytes of generated code for a function to be optimized
+// the very first time it is seen on the stack.
+static const int kMaxSizeEarlyOpt = 500;
+
Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+
+// TODO(isolates): Clean up the semaphore when it is no longer required.
+static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_setup_ = false;
+bool RuntimeProfiler::has_been_globally_set_up_ = false;
#endif
bool RuntimeProfiler::enabled_ = false;
@@ -81,21 +94,45 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
void RuntimeProfiler::GlobalSetup() {
- ASSERT(!has_been_globally_setup_);
+ ASSERT(!has_been_globally_set_up_);
enabled_ = V8::UseCrankshaft() && FLAG_opt;
#ifdef DEBUG
- has_been_globally_setup_ = true;
+ has_been_globally_set_up_ = true;
#endif
}
-void RuntimeProfiler::Optimize(JSFunction* function) {
+static void GetICCounts(JSFunction* function,
+ int* ic_with_typeinfo_count,
+ int* ic_total_count,
+ int* percentage) {
+ *ic_total_count = 0;
+ *ic_with_typeinfo_count = 0;
+ Object* raw_info =
+ function->shared()->code()->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
+ *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+ *ic_total_count = info->ic_total_count();
+ }
+ *percentage = *ic_total_count > 0
+ ? 100 * *ic_with_typeinfo_count / *ic_total_count
+ : 100;
+}
+
+
+void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
if (FLAG_trace_opt) {
PrintF("[marking ");
function->PrintName();
PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
- PrintF(" for recompilation");
+ PrintF(" for recompilation, reason: %s", reason);
+ if (FLAG_type_info_threshold > 0) {
+ int typeinfo, total, percentage;
+ GetICCounts(function, &typeinfo, &total, &percentage);
+ PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
+ }
PrintF("]\n");
}
@@ -134,15 +171,26 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- StackCheckStub check_stub;
- Object* check_code;
- MaybeObject* maybe_check_code = check_stub.TryGetCode();
- if (maybe_check_code->ToObject(&check_code)) {
+ bool found_code = false;
+ Code* stack_check_code = NULL;
+#if defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_MIPS)
+ if (FLAG_count_based_interrupts) {
+ InterruptStub interrupt_stub;
+ found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
+ } else // NOLINT
+#endif
+ { // NOLINT
+ StackCheckStub check_stub;
+ found_code = check_stub.FindCodeInCache(&stack_check_code);
+ }
+ if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchStackCheckCode(unoptimized_code,
- Code::cast(check_code),
+ stack_check_code,
replacement_code);
}
}
@@ -186,28 +234,33 @@ void RuntimeProfiler::OptimizeNow() {
JSFunction* samples[kSamplerFrameCount];
int sample_count = 0;
int frame_count = 0;
+ int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
+ : kSamplerFrameCount;
for (JavaScriptFrameIterator it(isolate_);
- frame_count++ < kSamplerFrameCount && !it.done();
+ frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = JSFunction::cast(frame->function());
- // Adjust threshold each time we have processed
- // a certain number of ticks.
- if (sampler_ticks_until_threshold_adjustment_ > 0) {
- sampler_ticks_until_threshold_adjustment_--;
- if (sampler_ticks_until_threshold_adjustment_ <= 0) {
- // If the threshold is not already at the minimum
- // modify and reset the ticks until next adjustment.
- if (sampler_threshold_ > kSamplerThresholdMin) {
- sampler_threshold_ -= kSamplerThresholdDelta;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
+ if (!FLAG_watch_ic_patching) {
+ // Adjust threshold each time we have processed
+ // a certain number of ticks.
+ if (sampler_ticks_until_threshold_adjustment_ > 0) {
+ sampler_ticks_until_threshold_adjustment_--;
+ if (sampler_ticks_until_threshold_adjustment_ <= 0) {
+ // If the threshold is not already at the minimum
+ // modify and reset the ticks until next adjustment.
+ if (sampler_threshold_ > kSamplerThresholdMin) {
+ sampler_threshold_ -= kSamplerThresholdDelta;
+ sampler_ticks_until_threshold_adjustment_ =
+ kSamplerTicksBetweenThresholdAdjustment;
+ }
}
}
}
- if (function->IsMarkedForLazyRecompilation()) {
+ if (function->IsMarkedForLazyRecompilation() &&
+ function->shared()->code()->kind() == Code::FUNCTION) {
Code* unoptimized = function->shared()->code();
int nesting = unoptimized->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
@@ -217,37 +270,101 @@ void RuntimeProfiler::OptimizeNow() {
// Do not record non-optimizable functions.
if (!function->IsOptimizable()) continue;
- samples[sample_count++] = function;
+ if (function->shared()->optimization_disabled()) continue;
+
+ // Only record top-level code on top of the execution stack and
+ // avoid optimizing excessively large scripts since top-level code
+ // will be executed only once.
+ const int kMaxToplevelSourceSize = 10 * 1024;
+ if (function->shared()->is_toplevel()
+ && (frame_count > 1
+ || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
+ continue;
+ }
+
+ if (FLAG_watch_ic_patching) {
+ int ticks = function->shared()->profiler_ticks();
+
+ if (ticks >= kProfilerTicksBeforeOptimization) {
+ int typeinfo, total, percentage;
+ GetICCounts(function, &typeinfo, &total, &percentage);
+ if (percentage >= FLAG_type_info_threshold) {
+ // If this particular function hasn't had any ICs patched for enough
+ // ticks, optimize it now.
+ Optimize(function, "hot and stable");
+ } else if (ticks >= 100) {
+ // If this function does not have enough type info, but has
+ // seen a huge number of ticks, optimize it as it is.
+ Optimize(function, "not much type info but very hot");
+ } else {
+ function->shared()->set_profiler_ticks(ticks + 1);
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function->PrintName();
+ PrintF(", not enough type info: %d/%d (%d%%)]\n",
+ typeinfo, total, percentage);
+ }
+ }
+ } else if (!any_ic_changed_ &&
+ function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ Optimize(function, "small function");
+ } else if (!code_generated_ &&
+ !any_ic_changed_ &&
+ total_code_generated_ > 0 &&
+ total_code_generated_ < 2000) {
+ // If no code was generated and no IC was patched since the last tick,
+ // but a little code has already been generated since last Reset(),
+ // then type info might already be stable and we can optimize now.
+ Optimize(function, "stable on startup");
+ } else {
+ function->shared()->set_profiler_ticks(ticks + 1);
+ }
+ } else { // !FLAG_watch_ic_patching
+ samples[sample_count++] = function;
- int function_size = function->shared()->SourceSize();
- int threshold_size_factor = (function_size > kSizeLimit)
- ? sampler_threshold_size_factor_
- : 1;
+ int function_size = function->shared()->SourceSize();
+ int threshold_size_factor = (function_size > kSizeLimit)
+ ? sampler_threshold_size_factor_
+ : 1;
- int threshold = sampler_threshold_ * threshold_size_factor;
+ int threshold = sampler_threshold_ * threshold_size_factor;
- if (LookupSample(function) >= threshold) {
- Optimize(function);
+ if (LookupSample(function) >= threshold) {
+ Optimize(function, "sampler window lookup");
+ }
}
}
-
- // Add the collected functions as samples. It's important not to do
- // this as part of collecting them because this will interfere with
- // the sample lookup in case of recursive functions.
- for (int i = 0; i < sample_count; i++) {
- AddSample(samples[i], kSamplerFrameWeight[i]);
+ if (FLAG_watch_ic_patching) {
+ any_ic_changed_ = false;
+ code_generated_ = false;
+ } else { // !FLAG_watch_ic_patching
+ // Add the collected functions as samples. It's important not to do
+ // this as part of collecting them because this will interfere with
+ // the sample lookup in case of recursive functions.
+ for (int i = 0; i < sample_count; i++) {
+ AddSample(samples[i], kSamplerFrameWeight[i]);
+ }
}
}
void RuntimeProfiler::NotifyTick() {
+#if defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_MIPS)
+ if (FLAG_count_based_interrupts) return;
+#endif
isolate_->stack_guard()->RequestRuntimeProfilerTick();
}
-void RuntimeProfiler::Setup() {
- ASSERT(has_been_globally_setup_);
- ClearSampleBuffer();
+void RuntimeProfiler::SetUp() {
+ ASSERT(has_been_globally_set_up_);
+ if (!FLAG_watch_ic_patching) {
+ ClearSampleBuffer();
+ }
// If the ticker hasn't already started, make sure to do so to get
// the ticks for the runtime profiler.
if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
@@ -255,10 +372,14 @@ void RuntimeProfiler::Setup() {
void RuntimeProfiler::Reset() {
- sampler_threshold_ = kSamplerThresholdInit;
- sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
- sampler_ticks_until_threshold_adjustment_ =
- kSamplerTicksBetweenThresholdAdjustment;
+ if (FLAG_watch_ic_patching) {
+ total_code_generated_ = 0;
+ } else { // !FLAG_watch_ic_patching
+ sampler_threshold_ = kSamplerThresholdInit;
+ sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
+ sampler_ticks_until_threshold_adjustment_ =
+ kSamplerTicksBetweenThresholdAdjustment;
+ }
}
@@ -295,7 +416,7 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
// undid the decrement done by the profiler thread. Increment again
// to get the right count of active isolates.
NoBarrier_AtomicIncrement(&state_, 1);
- semaphore_->Signal();
+ semaphore.Pointer()->Signal();
}
@@ -308,7 +429,7 @@ bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
ASSERT(old_state >= -1);
if (old_state != 0) return false;
- semaphore_->Wait();
+ semaphore.Pointer()->Wait();
return true;
}
@@ -324,7 +445,7 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
if (new_state == 0) {
// The profiler thread is waiting. Wake it up. It must check for
// stop conditions before attempting to wait again.
- semaphore_->Signal();
+ semaphore.Pointer()->Signal();
}
thread->Join();
// The profiler thread is now stopped. Undo the increment in case it
@@ -338,7 +459,8 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];
- if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
+ if (function != NULL &&
+ !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
sampler_window_[i] = NULL;
}
}