summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2012-04-23 10:39:17 +0100
committerBen Murdoch <benm@google.com>2012-04-23 10:39:17 +0100
commit8f9999fcc44cfd4f5e1140c6678bbca4cf8ea1c7 (patch)
tree6737d3b2a7df79987192a9d9c5a285d8a01e31cd
parent3ef787dbeca8a5fb1086949cda830dccee07bfbd (diff)
downloadandroid_external_v8-8f9999fcc44cfd4f5e1140c6678bbca4cf8ea1c7.tar.gz
android_external_v8-8f9999fcc44cfd4f5e1140c6678bbca4cf8ea1c7.tar.bz2
android_external_v8-8f9999fcc44cfd4f5e1140c6678bbca4cf8ea1c7.zip
Merge V8 at 3.9.24.15
Bug: 5688872 Change-Id: Idd477521ab57b89f9c3200852b82af8e08aa2191
-rw-r--r--V8_MERGE_REVISION4
-rw-r--r--src/arm/stub-cache-arm.cc4
-rw-r--r--src/compiler.cc8
-rw-r--r--src/factory.cc4
-rw-r--r--src/full-codegen.cc1
-rw-r--r--src/heap.cc27
-rw-r--r--src/heap.h5
-rw-r--r--src/ia32/stub-cache-ia32.cc4
-rw-r--r--src/ic.cc78
-rw-r--r--src/incremental-marking.cc15
-rw-r--r--src/incremental-marking.h12
-rw-r--r--src/mark-compact.cc4
-rw-r--r--src/mips/stub-cache-mips.cc4
-rw-r--r--src/objects-inl.h17
-rw-r--r--src/objects-printer.cc4
-rw-r--r--src/objects.cc44
-rw-r--r--src/objects.h43
-rw-r--r--src/runtime-profiler.cc37
-rw-r--r--src/runtime.cc3
-rw-r--r--src/spaces.cc6
-rw-r--r--src/version.cc2
-rw-r--r--src/x64/stub-cache-x64.cc4
-rw-r--r--test/cctest/test-heap.cc97
-rw-r--r--test/mjsunit/regress/regress-crbug-122271.js49
24 files changed, 352 insertions, 124 deletions
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 3c9aae89..9bdc5bd3 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-V8 3.9.24.13
-http://v8.googlecode.com/svn/branches/3.9@11337
+V8 3.9.24.15
+http://v8.googlecode.com/svn/branches/3.9@11368
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 06f8385a..852824f8 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -443,8 +443,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Label exit;
// Check that the map of the object hasn't changed.
+ CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+ : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
diff --git a/src/compiler.cc b/src/compiler.cc
index 22723377..c9c2480f 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -531,6 +531,10 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
}
+ } else {
+ if (result->ic_age() != HEAP->global_ic_age()) {
+ result->ResetForNewContext(HEAP->global_ic_age());
+ }
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -586,6 +590,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
compilation_cache->PutEval(
source, context, is_global, result, scope_position);
}
+ } else {
+ if (result->ic_age() != HEAP->global_ic_age()) {
+ result->ResetForNewContext(HEAP->global_ic_age());
+ }
}
return result;
diff --git a/src/factory.cc b/src/factory.cc
index 143099cf..e8a9f26a 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -537,6 +537,10 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
: isolate()->strict_mode_function_map(),
pretenure);
+ if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
+ function_info->ResetForNewContext(isolate()->heap()->global_ic_age());
+ }
+
result->set_context(*context);
if (!function_info->bound()) {
int number_of_literals = function_info->num_literals();
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index d963979a..531eed24 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -327,6 +327,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
+ code->set_profiler_ticks(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
diff --git a/src/heap.cc b/src/heap.cc
index a1cccf6f..48e8b567 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -145,7 +145,6 @@ Heap::Heap()
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
- idle_notification_will_schedule_next_gc_(false),
mark_sweeps_since_idle_round_started_(0),
ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
@@ -504,11 +503,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ // Make progress in incremental marking.
+ const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+ incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (!incremental_marking()->IsComplete()) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ }
+ collector = SCAVENGER;
+ collector_reason = "incremental marking delaying mark-sweep";
}
- collector = SCAVENGER;
- collector_reason = "incremental marking delaying mark-sweep";
}
bool next_gc_likely_to_collect_more = false;
@@ -1953,7 +1958,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->set_ic_total_count(0);
- info->set_ic_with_typeinfo_count(0);
+ info->set_ic_with_type_info_count(0);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
@@ -2897,9 +2902,9 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_profiler_ticks(0);
share->set_ast_node_count(0);
+ share->set_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -4817,10 +4822,8 @@ void Heap::EnsureHeapIsIterable() {
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
+ incremental_marking()->Step(step_size,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
diff --git a/src/heap.h b/src/heap.h
index 2bd037f1..0391e0e5 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1569,10 +1569,6 @@ class Heap {
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
- bool idle_notification_will_schedule_next_gc() {
- return idle_notification_will_schedule_next_gc_;
- }
-
uint32_t HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
ASSERT(FLAG_randomize_hashes || seed == 0);
@@ -2033,7 +2029,6 @@ class Heap {
unsigned int last_idle_notification_gc_count_;
bool last_idle_notification_gc_count_init_;
- bool idle_notification_will_schedule_next_gc_;
int mark_sweeps_since_idle_round_started_;
int ms_count_at_last_idle_notification_;
unsigned int gc_count_at_last_idle_gc_;
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index fd267798..5a5afdc7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -746,8 +746,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the map of the object hasn't changed.
+ CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+ : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ miss_label, DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
diff --git a/src/ic.cc b/src/ic.cc
index c7621277..b8d4b40b 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -296,58 +296,44 @@ Failure* IC::ReferenceError(const char* type, Handle<String> name) {
}
+static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
+ bool was_uninitialized =
+ old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
+ bool is_uninitialized =
+ new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
+ return (was_uninitialized && !is_uninitialized) ? 1 :
+ (!was_uninitialized && is_uninitialized) ? -1 : 0;
+}
+
+
void IC::PostPatching(Address address, Code* target, Code* old_target) {
- if (FLAG_type_info_threshold > 0) {
- if (old_target->is_inline_cache_stub() &&
- target->is_inline_cache_stub()) {
- State old_state = old_target->ic_state();
- State new_state = target->ic_state();
- bool was_uninitialized =
- old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
- bool is_uninitialized =
- new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
- int delta = 0;
- if (was_uninitialized && !is_uninitialized) {
- delta = 1;
- } else if (!was_uninitialized && is_uninitialized) {
- delta = -1;
- }
- if (delta != 0) {
- Code* host = target->GetHeap()->isolate()->
- inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- // Not all Code objects have TypeFeedbackInfo.
- if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
- info->set_ic_with_typeinfo_count(
- info->ic_with_typeinfo_count() + delta);
- }
- }
+ if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
+ return;
+ }
+ Code* host = target->GetHeap()->isolate()->
+ inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ if (host->kind() != Code::FUNCTION) return;
+
+ if (FLAG_type_info_threshold > 0 &&
+ old_target->is_inline_cache_stub() &&
+ target->is_inline_cache_stub()) {
+ int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
+ target->ic_state());
+ // Not all Code objects have TypeFeedbackInfo.
+ if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info =
+ TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->set_ic_with_type_info_count(
+ info->ic_with_type_info_count() + delta);
}
}
if (FLAG_watch_ic_patching) {
+ host->set_profiler_ticks(0);
Isolate::Current()->runtime_profiler()->NotifyICChanged();
- // We do not want to optimize until the ICs have settled down,
- // so when they are patched, we postpone optimization for the
- // current function and the functions above it on the stack that
- // might want to inline this one.
- StackFrameIterator it;
- if (it.done()) return;
- it.Advance();
- static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
- for (int i = 0; i < kStackFramesToMark; ++i) {
- if (it.done()) return;
- StackFrame* raw_frame = it.frame();
- if (raw_frame->is_java_script()) {
- JSFunction* function =
- JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
- if (function->IsOptimized()) continue;
- SharedFunctionInfo* shared = function->shared();
- shared->set_profiler_ticks(0);
- }
- it.Advance();
- }
}
+ // TODO(2029): When an optimized function is patched, it would
+ // be nice to propagate the corresponding type information to its
+ // unoptimized version for the benefit of later inlining.
}
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 8fe89b4a..7bbd5218 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -205,6 +205,12 @@ class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
MarkObject(target);
}
+ void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
+ if (shared->ic_age() != heap_->global_ic_age()) {
+ shared->ResetForNewContext(heap_->global_ic_age());
+ }
+ }
+
void VisitPointer(Object** p) {
Object* obj = *p;
if (obj->NonFailureIsHeapObject()) {
@@ -743,7 +749,7 @@ void IncrementalMarking::Finalize() {
}
-void IncrementalMarking::MarkingComplete() {
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a
@@ -754,13 +760,14 @@ void IncrementalMarking::MarkingComplete() {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
- if (!heap_->idle_notification_will_schedule_next_gc()) {
+ if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
}
-void IncrementalMarking::Step(intptr_t allocated_bytes) {
+void IncrementalMarking::Step(intptr_t allocated_bytes,
+ CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
!FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
@@ -833,7 +840,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
- if (marking_deque_.IsEmpty()) MarkingComplete();
+ if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
allocated_ = 0;
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index 4f8fa6b1..8cbe6c18 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -46,6 +46,11 @@ class IncrementalMarking {
COMPLETE
};
+ enum CompletionAction {
+ GC_VIA_STACK_GUARD,
+ NO_GC_VIA_STACK_GUARD
+ };
+
explicit IncrementalMarking(Heap* heap);
void TearDown();
@@ -82,7 +87,7 @@ class IncrementalMarking {
void Abort();
- void MarkingComplete();
+ void MarkingComplete(CompletionAction action);
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
@@ -102,10 +107,11 @@ class IncrementalMarking {
static const intptr_t kMaxAllocationMarkingFactor = 1000;
void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+ Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+ GC_VIA_STACK_GUARD);
}
- void Step(intptr_t allocated);
+ void Step(intptr_t allocated, CompletionAction action);
inline void RestartIfNotMarking() {
if (state_ == COMPLETE) {
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index dde172d2..b4f488bd 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1406,6 +1406,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
+ }
+
if (!known_flush_code_candidate) {
known_flush_code_candidate = IsFlushable(heap, shared);
if (known_flush_code_candidate) {
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 294bc0a0..f7b94b92 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -429,8 +429,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// a0 : value.
Label exit;
// Check that the map of the object hasn't changed.
+ CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+ : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 78578cc8..6c2c73aa 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3088,6 +3088,19 @@ void Code::set_allow_osr_at_loop_nesting_level(int level) {
}
+int Code::profiler_ticks() {
+ ASSERT(kind() == FUNCTION);
+ return READ_BYTE_FIELD(this, kProfilerTicksOffset);
+}
+
+
+void Code::set_profiler_ticks(int ticks) {
+ ASSERT(kind() == FUNCTION);
+ ASSERT(ticks < 256);
+ WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+}
+
+
unsigned Code::stack_slots() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
return READ_UINT32_FIELD(this, kStackSlotsOffset);
@@ -3507,8 +3520,8 @@ ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
-SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
kHiddenPrototypeBit)
@@ -4814,7 +4827,7 @@ Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
+SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
kIcWithTypeinfoCountOffset)
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 38e61386..2353a952 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -559,8 +559,8 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
- PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
- ic_total_count(), ic_with_typeinfo_count());
+ PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
+ ic_total_count(), ic_with_type_info_count());
PrintF(out, "\n - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
diff --git a/src/objects.cc b/src/objects.cc
index 64d85a06..208c75b8 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1390,9 +1390,11 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_FLOAT_ARRAY_TYPE:
case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
+ shared->SharedFunctionInfoIterateBody(v);
break;
+ }
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
@@ -7869,6 +7871,22 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) {
}
+void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
+ code()->ClearInlineCaches();
+ set_ic_age(new_ic_age);
+ if (code()->kind() == Code::FUNCTION) {
+ code()->set_profiler_ticks(0);
+ if (optimization_disabled() &&
+ opt_count() >= Compiler::kDefaultMaxOptCount) {
+ // Re-enable optimizations if they were disabled due to opt_count limit.
+ set_optimization_disabled(false);
+ code()->set_optimizable(true);
+ }
+ set_opt_count(0);
+ }
+}
+
+
static void GetMinInobjectSlack(Map* map, void* data) {
int slack = map->unused_property_fields();
if (*reinterpret_cast<int*>(data) > slack) {
@@ -7912,6 +7930,12 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
+void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
+ v->VisitSharedFunctionInfo(this);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+}
+
+
#define DECLARE_TAG(ignore1, name, ignore2) name,
const char* const VisitorSynchronization::kTags[
VisitorSynchronization::kNumberOfSyncTags] = {
@@ -7969,7 +7993,6 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
-
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
VisitPointer(rinfo->target_object_address());
@@ -8116,6 +8139,21 @@ Map* Code::FindFirstMap() {
}
+void Code::ClearInlineCaches() {
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
+ if (target->is_inline_cache_stub()) {
+ IC::Clear(info->pc());
+ }
+ }
+}
+
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
diff --git a/src/objects.h b/src/objects.h
index a9cb8e0d..ccd07ff1 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -4255,6 +4255,11 @@ class Code: public HeapObject {
inline void set_allow_osr_at_loop_nesting_level(int level);
inline int allow_osr_at_loop_nesting_level();
+ // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
+ // the code object was seen on the stack with no IC patching going on.
+ inline int profiler_ticks();
+ inline void set_profiler_ticks(int ticks);
+
// [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
// reserved in the code prologue.
inline unsigned stack_slots();
@@ -4423,6 +4428,7 @@ class Code: public HeapObject {
#ifdef DEBUG
void CodeVerify();
#endif
+ void ClearInlineCaches();
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -4473,6 +4479,7 @@ class Code: public HeapObject {
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
+ static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
@@ -5323,16 +5330,18 @@ class SharedFunctionInfo: public HeapObject {
inline int compiler_hints();
inline void set_compiler_hints(int value);
+ inline int ast_node_count();
+ inline void set_ast_node_count(int count);
+
// A counter used to determine when to stress the deoptimizer with a
// deopt.
inline int deopt_counter();
inline void set_deopt_counter(int counter);
- inline int profiler_ticks();
- inline void set_profiler_ticks(int ticks);
-
- inline int ast_node_count();
- inline void set_ast_node_count(int count);
+ // Inline cache age is used to infer whether the function survived a context
+ // disposal or not. In the former case we reset the opt_count.
+ inline int ic_age();
+ inline void set_ic_age(int age);
// Add information on assignments of the form this.x = ...;
void SetThisPropertyAssignmentsInfo(
@@ -5478,6 +5487,8 @@ class SharedFunctionInfo: public HeapObject {
void SharedFunctionInfoVerify();
#endif
+ void ResetForNewContext(int new_ic_age);
+
// Helpers to compile the shared code. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
@@ -5485,6 +5496,8 @@ class SharedFunctionInfo: public HeapObject {
static bool CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
+ void SharedFunctionInfoIterateBody(ObjectVisitor* v);
+
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -5508,12 +5521,13 @@ class SharedFunctionInfo: public HeapObject {
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
- static const int kProfilerTicksOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ // ic_age is a Smi field. It could be grouped with another Smi field into a
+ // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+ static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kProfilerTicksOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5532,8 +5546,9 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
- static const int kDeoptCounterOffset =
- kAstNodeCountOffset + kPointerSize;
+ static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
+
+
// Total size.
static const int kSize = kDeoptCounterOffset + kPointerSize;
#else
@@ -5547,7 +5562,7 @@ class SharedFunctionInfo: public HeapObject {
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kProfilerTicksOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -6562,8 +6577,8 @@ class TypeFeedbackInfo: public Struct {
inline int ic_total_count();
inline void set_ic_total_count(int count);
- inline int ic_with_typeinfo_count();
- inline void set_ic_with_typeinfo_count(int count);
+ inline int ic_with_type_info_count();
+ inline void set_ic_with_type_info_count(int count);
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
@@ -8531,6 +8546,8 @@ class ObjectVisitor BASE_EMBEDDED {
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+ virtual void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {}
+
// Visits a contiguous arrays of external references (references to the C++
// heap) in the half-open range [start, end). Any or all of the values
// may be modified on return.
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 6ed4ff48..b06168a2 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,6 +65,12 @@ static const int kSizeLimit = 1500;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If a function does not have enough type info (according to
+// FLAG_type_info_threshold), but has seen a huge number of ticks,
+// optimize it as it is.
+static const int kTicksWhenNotEnoughTypeInfo = 100;
+// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -103,20 +109,20 @@ void RuntimeProfiler::GlobalSetup() {
static void GetICCounts(JSFunction* function,
- int* ic_with_typeinfo_count,
+ int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
- *ic_with_typeinfo_count = 0;
+ *ic_with_type_info_count = 0;
Object* raw_info =
function->shared()->code()->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+ *ic_with_type_info_count = info->ic_with_type_info_count();
*ic_total_count = info->ic_total_count();
}
*percentage = *ic_total_count > 0
- ? 100 * *ic_with_typeinfo_count / *ic_total_count
+ ? 100 * *ic_with_type_info_count / *ic_total_count
: 100;
}
@@ -259,13 +265,14 @@ void RuntimeProfiler::OptimizeNow() {
}
}
- if (function->IsMarkedForLazyRecompilation() &&
- function->shared()->code()->kind() == Code::FUNCTION) {
- Code* unoptimized = function->shared()->code();
- int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+ Code* shared_code = function->shared()->code();
+ if (shared_code->kind() != Code::FUNCTION) continue;
+
+ if (function->IsMarkedForLazyRecompilation()) {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
// Do not record non-optimizable functions.
@@ -283,7 +290,7 @@ void RuntimeProfiler::OptimizeNow() {
}
if (FLAG_watch_ic_patching) {
- int ticks = function->shared()->profiler_ticks();
+ int ticks = shared_code->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
@@ -292,12 +299,10 @@ void RuntimeProfiler::OptimizeNow() {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
- } else if (ticks >= 100) {
- // If this function does not have enough type info, but has
- // seen a huge number of ticks, optimize it as it is.
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -306,7 +311,7 @@ void RuntimeProfiler::OptimizeNow() {
}
}
} else if (!any_ic_changed_ &&
- function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
@@ -319,7 +324,7 @@ void RuntimeProfiler::OptimizeNow() {
// then type info might already be stable and we can optimize now.
Optimize(function, "stable on startup");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
}
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
diff --git a/src/runtime.cc b/src/runtime.cc
index 320ab59f..82ffdd05 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -8043,8 +8043,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
- function->shared()->set_profiler_ticks(0);
-
// If the function is not compiled ignore the lazy
// recompilation. This can happen if the debugger is activated and
// the function is returned to the not compiled state.
@@ -8067,6 +8065,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
+ function->shared()->code()->set_profiler_ticks(0);
if (JSFunction::CompileOptimized(function,
AstNode::kNoNumber,
CLEAR_EXCEPTION)) {
diff --git a/src/spaces.cc b/src/spaces.cc
index defe3526..57b223fa 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1198,13 +1198,15 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
allocation_info_.limit + inline_allocation_limit_step_,
high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
diff --git a/src/version.cc b/src/version.cc
index c4fc0d77..b7fd7cb4 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
#define BUILD_NUMBER 24
-#define PATCH_LEVEL 13
+#define PATCH_LEVEL 15
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index f07f6b6b..a275f550 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -730,8 +730,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the map of the object hasn't changed.
+ CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+ : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ miss_label, DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 999e2c66..f97bf172 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1521,17 +1521,13 @@ TEST(InstanceOfStubWriteBarrier) {
while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
!marking->IsStopped()) {
- marking->Step(MB);
+ // Discard any pending GC requests otherwise we will get GC when we enter
+ // code below.
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
}
CHECK(marking->IsMarking());
- // Discard any pending GC requests otherwise we will get GC when we enter
- // code below.
- if (ISOLATE->stack_guard()->IsGCRequest()) {
- ISOLATE->stack_guard()->Continue(GC_REQUEST);
- }
-
{
v8::HandleScope scope;
v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
@@ -1597,3 +1593,90 @@ TEST(PrototypeTransitionClearing) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ marking->Abort();
+ marking->Start();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ while (!marking->IsStopped() && !marking->IsComplete()) {
+ marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ }
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ HEAP->incremental_marking()->Abort();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ // Since incremental marking is off, IdleNotification will do full GC.
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
diff --git a/test/mjsunit/regress/regress-crbug-122271.js b/test/mjsunit/regress/regress-crbug-122271.js
new file mode 100644
index 00000000..3a99a7fa
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-122271.js
@@ -0,0 +1,49 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Tests that ElementsKind transitions and regular transitions don't
+// interfere badly with each other.
+
+var a = [0, 0, 0, 1];
+var b = [0, 0, 0, "one"];
+var c = [0, 0, 0, 1];
+c.foo = "baz";
+
+function foo(array) {
+ array.foo = "bar";
+}
+
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));
+
+foo(a);
+foo(b);
+
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));