summaryrefslogtreecommitdiffstats
path: root/src/ia32
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2012-04-11 10:23:59 +0100
committerBen Murdoch <benm@google.com>2012-04-11 15:40:41 +0100
commit5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b (patch)
tree7b717e53b80c4a64bf9b723aabcf7c909ae3c243 /src/ia32
parentc7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9 (diff)
downloadandroid_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.tar.gz
android_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.tar.bz2
android_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.zip
Merge V8 3.9 at 3.9.24.9
http://v8.googlecode.com/svn/branches/3.9@11260 Bug: 5688872 Change-Id: Iddd944e82189d92df3fc427dc5f0d3f1b2f0c6c8
Diffstat (limited to 'src/ia32')
-rw-r--r--src/ia32/assembler-ia32-inl.h2
-rw-r--r--src/ia32/assembler-ia32.h38
-rw-r--r--src/ia32/builtins-ia32.cc113
-rw-r--r--src/ia32/code-stubs-ia32.cc351
-rw-r--r--src/ia32/code-stubs-ia32.h3
-rw-r--r--src/ia32/codegen-ia32.cc129
-rw-r--r--src/ia32/debug-ia32.cc52
-rw-r--r--src/ia32/deoptimizer-ia32.cc205
-rw-r--r--src/ia32/full-codegen-ia32.cc371
-rw-r--r--src/ia32/ic-ia32.cc64
-rw-r--r--src/ia32/lithium-codegen-ia32.cc439
-rw-r--r--src/ia32/lithium-codegen-ia32.h8
-rw-r--r--src/ia32/lithium-ia32.cc155
-rw-r--r--src/ia32/lithium-ia32.h175
-rw-r--r--src/ia32/macro-assembler-ia32.cc122
-rw-r--r--src/ia32/macro-assembler-ia32.h29
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.cc4
-rw-r--r--src/ia32/stub-cache-ia32.cc387
18 files changed, 1913 insertions, 734 deletions
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index ef109229..3cf0d005 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -88,7 +88,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return Assembler::kSpecialTargetSize;
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 9ed46fc3..929b485e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -97,16 +97,25 @@ struct Register {
int code_;
};
-
-const Register eax = { 0 };
-const Register ecx = { 1 };
-const Register edx = { 2 };
-const Register ebx = { 3 };
-const Register esp = { 4 };
-const Register ebp = { 5 };
-const Register esi = { 6 };
-const Register edi = { 7 };
-const Register no_reg = { -1 };
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
+
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
@@ -589,8 +598,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address instruction_payload,
- Address target) {
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Address target) {
set_target_address_at(instruction_payload, target);
}
@@ -601,8 +610,7 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
@@ -621,8 +629,6 @@ class Assembler : public AssemblerBase {
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
- // One byte opcode for test eax,0xXXXXXXXX.
- static const byte kTestEaxByte = 0xA9;
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// One byte opcode for nop.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 28c97f0e..a5d42cfb 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -74,50 +74,14 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
// -----------------------------------
- Label slow, non_function_call;
- // Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
- // Jump to the function-specific construct stub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
-
- // edi: called object
- // eax: number of arguments
- // ecx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- Handle<Code> arguments_adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
@@ -360,6 +324,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
+
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -454,8 +423,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
- __ call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION,
@@ -929,9 +898,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- // Load the initial map from the array function.
- __ mov(scratch1, FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -1034,10 +1002,7 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
- // Load the initial map from the array function.
- __ mov(elements_array,
- FieldOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch, elements_array);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@@ -1128,7 +1093,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array;
+ empty_array, not_empty_array, finish, cant_transition_map, not_double;
// Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack.
@@ -1287,6 +1252,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
+ __ bind(&finish);
__ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
__ pop(ebx);
@@ -1295,9 +1261,43 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(ecx);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &not_double,
+ DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
// Throw away the array that's only been partially constructed.
__ pop(eax);
__ UndoAllocationInNewSpace(eax);
+ __ jmp(&prepare_generic_code_call);
+
+ __ bind(&not_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ __ mov(ebx, Operand(esp, 0));
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ edi,
+ eax,
+ &cant_transition_map);
+ __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
+ __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare to re-enter the loop
+ __ lea(edi, Operand(esp, last_arg_offset));
+
+ // Finish the array initialization loop.
+ Label loop2;
+ __ bind(&loop2);
+ __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ __ mov(Operand(edx, 0), eax);
+ __ add(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(greater_equal, &loop2);
+ __ jmp(&finish);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
@@ -1321,7 +1321,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray function shoud be a map.
+ // Initial map for the builtin InternalArray function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
@@ -1334,8 +1334,8 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// function.
ArrayNativeCode(masm, false, &generic_array_code);
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
+ // Jump to the generic internal array code in case the specialized code cannot
+ // handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
@@ -1355,7 +1355,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
+ // Initial map for the builtin Array function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
@@ -1644,7 +1644,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
+ // Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
@@ -1699,8 +1701,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
+ // Insert a stack guard check so that if we decide not to perform
+ // on-stack replacement right away, the function calling this stub can
+ // still be interrupted.
__ bind(&stack_check);
Label ok;
ExternalReference stack_limit =
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index eded335e..4faa6a4b 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -2510,7 +2510,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
- GenerateOperation(masm);
+ GenerateOperation(masm, type_);
__ mov(Operand(ecx, 0), ebx);
__ mov(Operand(ecx, kIntSize), edx);
__ mov(Operand(ecx, 2 * kIntSize), eax);
@@ -2526,7 +2526,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
- GenerateOperation(masm);
+ GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
@@ -2578,14 +2578,15 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+void TranscendentalCacheStub::GenerateOperation(
+ MacroAssembler* masm, TranscendentalCache::Type type) {
// Only free register is edi.
// Input value is on FP stack, and also in ebx/edx.
// Input value is possibly in xmm1.
// Address of result (a newly allocated HeapNumber) may be in eax.
- if (type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS ||
- type_ == TranscendentalCache::TAN) {
+ if (type == TranscendentalCache::SIN ||
+ type == TranscendentalCache::COS ||
+ type == TranscendentalCache::TAN) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
@@ -2649,7 +2650,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// FPU Stack: input % 2*pi
__ bind(&in_range);
- switch (type_) {
+ switch (type) {
case TranscendentalCache::SIN:
__ fsin();
break;
@@ -2667,7 +2668,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
__ bind(&done);
} else {
- ASSERT(type_ == TranscendentalCache::LOG);
+ ASSERT(type == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
@@ -3922,7 +3923,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Throw(eax);
__ bind(&throw_termination_exception);
- __ ThrowUncatchable(TERMINATION, eax);
+ __ ThrowUncatchable(eax);
__ bind(&failure);
// For failure to match, return null.
@@ -4573,30 +4574,51 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Handle<Code> code) {
- code->set_has_function_cache(RecordCallTarget());
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
- // 1 ~ size of the test eax opcode.
- Object* cell = Memory::Object_at(address + kPointerSize + 1);
- // Low-level because clearing happens during GC.
- reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
- RawUninitializedSentinel(heap));
-}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // ebx : cache cell for call target
+ // edi : the function to call
+ Isolate* isolate = masm->isolate();
+ Label initialize, done;
+
+ // Load the cache state into ecx.
+ __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &done, Label::kNear);
+ __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ j(equal, &done, Label::kNear);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize, Label::kNear);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kNear);
+ // An uninitialized cache is patched with the function.
+ __ bind(&initialize);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ // No need for a write barrier here - cells are rescanned.
-Object* CallFunctionStub::GetCachedValue(Address address) {
- ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
- // 1 ~ size of the test eax opcode.
- Object* cell = Memory::Object_at(address + kPointerSize + 1);
- return JSGlobalPropertyCell::cast(cell)->value();
+ __ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
@@ -4613,9 +4635,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ cmp(eax, isolate->factory()->the_hole_value());
__ j(not_equal, &receiver_ok, Label::kNear);
// Patch the receiver on the stack with the global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
__ bind(&receiver_ok);
}
@@ -4626,38 +4648,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- // Cache the called function in a global property cell in the
- // instruction stream after the call. Cache states are uninitialized,
- // monomorphic (indicated by a JSFunction), and megamorphic.
- Label initialize, call;
- // Load the cache cell address into ebx and the cache state into ecx.
- __ mov(ebx, Operand(esp, 0)); // Return address.
- __ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &call, Label::kNear);
- __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
- __ j(equal, &call, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(MegamorphicSentinel(isolate)));
- __ jmp(&call, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&call);
+ GenerateRecordCallTarget(masm);
}
// Fast-case: Just invoke the function.
@@ -4684,13 +4675,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
- // non-function case.
- __ mov(ebx, Operand(esp, 0));
- __ mov(ebx, Operand(ebx, 1));
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write barrier is needed.
+ // non-function case. MegamorphicSentinel is an immortal immovable
+ // object (undefined) so no write barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(MegamorphicSentinel(isolate)));
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
}
// Check for function proxy.
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
@@ -4720,6 +4708,50 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // eax : number of arguments
+ // ebx : cache cell for call target
+ // edi : constructor function
+ Label slow, non_function_call;
+
+ // Check that function is not a smi.
+ __ JumpIfSmi(edi, &non_function_call);
+ // Check that function is a JSFunction.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(ebx);
+
+ // edi: called object
+ // eax: number of arguments
+ // ecx: object map
+ Label do_call;
+ __ bind(&slow);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing eax).
+ __ Set(ebx, Immediate(0));
+ Handle<Code> arguments_adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ SetCallKind(ecx, CALL_AS_METHOD);
+ __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return false;
}
@@ -4754,11 +4786,6 @@ void CEntryStub::GenerateAheadOfTime() {
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(eax);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4877,12 +4904,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, eax);
-}
-
-
void CEntryStub::Generate(MacroAssembler* masm) {
// eax: number of arguments including receiver
// ebx: pointer to C function (C callee-saved)
@@ -4936,13 +4957,24 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ mov(Operand::StaticVariable(external_caught), Immediate(false));
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(eax);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(eax);
}
@@ -4996,7 +5028,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
// Clear any pending exceptions.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
@@ -6123,7 +6155,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// ebx: instance type
// Calculate length of sub string using the smi values.
- Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
__ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
@@ -6136,43 +6167,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ cmp(ecx, Immediate(Smi::FromInt(2)));
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi, value is 2)
- // edx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
- // Get the two characters forming the sub string.
- __ SmiUntag(edx); // From index is no longer smi.
- __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx,
- FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label combine_two_char, save_two_char;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &combine_two_char, &save_two_char);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&combine_two_char);
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
- __ bind(&save_two_char);
- __ AllocateAsciiString(eax, 2, ecx, edx, &runtime);
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&result_longer_than_two);
// eax: string
// ebx: instance type
// ecx: sub string length (smi)
@@ -6239,11 +6234,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
- __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
- __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
+ __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@@ -6467,7 +6462,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ add(index, Immediate(1));
+ __ inc(index);
__ j(not_zero, &loop);
}
@@ -6541,16 +6536,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
- Label unordered;
+ Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -6576,14 +6571,28 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
-
- __ bind(&unordered);
}
+ __ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ j(not_equal, &miss);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ j(equal, &unordered);
+ }
+
__ bind(&miss);
GenerateMiss(masm);
}
@@ -6636,9 +6645,10 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
- ASSERT(GetCondition() == equal);
Label miss;
+ bool equality = Token::IsEqualityOp(op_);
+
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
@@ -6677,25 +6687,33 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&not_same);
// Check that both strings are symbols. If they are, we're done
- // because we already know they are not identical.
- Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsSymbolMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ ret(0);
+ // because we already know they are not identical. But in the case of
+ // non-equality compare, we still need to determine the order.
+ if (equality) {
+ Label do_compare;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsSymbolMask));
+ __ j(zero, &do_compare, Label::kNear);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ ret(0);
+ __ bind(&do_compare);
+ }
// Check that both strings are sequential ASCII.
Label runtime;
- __ bind(&do_compare);
__ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
// Compare flat ASCII strings. Returns when done.
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3);
+ }
// Handle more complex cases in runtime.
__ bind(&runtime);
@@ -6703,7 +6721,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ push(left);
__ push(right);
__ push(tmp1);
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -6792,7 +6814,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
- // (their names are the null value).
+ // (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
Register index = r0;
@@ -6818,11 +6840,17 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss);
+ Label the_hole;
+ // Check for the hole and skip.
+ __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
+ __ j(equal, &the_hole, Label::kNear);
+
// Check if the entry name is not a symbol.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
kIsSymbolMask);
__ j(zero, miss);
+ __ bind(&the_hole);
}
StringDictionaryLookupStub stub(properties,
@@ -6996,42 +7024,47 @@ struct AheadOfTimeWriteBarrierStubList {
};
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { ebx, eax, edi, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
- { ebx, edi, edx, OMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal and CallFunctionStub.
- { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField and
// KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+ { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
// GenerateStoreField calls the stub with two different permutations of
// registers. This is the second.
- { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
// StoreIC::GenerateNormal via GenerateDictionaryStore
- { ebx, edi, edx, EMIT_REMEMBERED_SET },
+ { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
// KeyedStoreIC::GenerateGeneric.
- { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+ { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { edi, edx, ecx, EMIT_REMEMBERED_SET},
+ { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { edx, ebx, edi, EMIT_REMEMBERED_SET},
+ { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
- { eax, edx, esi, EMIT_REMEMBERED_SET},
- { edx, eax, edi, EMIT_REMEMBERED_SET},
+ { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
+ { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
- { ebx, eax, ecx, EMIT_REMEMBERED_SET},
+ { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7059,7 +7092,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 4d23c3a1..803a711d 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -49,6 +49,8 @@ class TranscendentalCacheStub: public CodeStub {
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
+ static void GenerateOperation(MacroAssembler* masm,
+ TranscendentalCache::Type type);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
@@ -56,7 +58,6 @@ class TranscendentalCacheStub: public CodeStub {
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
};
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index e5ca02c4..ea619103 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen.h"
+#include "heap.h"
#include "macro-assembler.h"
namespace v8 {
@@ -55,6 +56,85 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
+
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ if (buffer == NULL) {
+ // Fallback to library function if function cannot be created.
+ switch (type) {
+ case TranscendentalCache::SIN: return &sin;
+ case TranscendentalCache::COS: return &cos;
+ case TranscendentalCache::TAN: return &tan;
+ case TranscendentalCache::LOG: return &log;
+ default: UNIMPLEMENTED();
+ }
+ }
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ // Move double input into registers.
+
+ __ push(ebx);
+ __ push(edx);
+ __ push(edi);
+ __ fld_d(Operand(esp, 4 * kPointerSize));
+ __ mov(ebx, Operand(esp, 4 * kPointerSize));
+ __ mov(edx, Operand(esp, 5 * kPointerSize));
+ TranscendentalCacheStub::GenerateOperation(&masm, type);
+ // The return value is expected to be on ST(0) of the FPU stack.
+ __ pop(edi);
+ __ pop(edx);
+ __ pop(ebx);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+ &actual_size,
+ true));
+ // If SSE2 is not available, we can use libc's implementation to ensure
+ // consistency since code by fullcodegen's calls into runtime in that case.
+ if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ // Move double input into registers.
+ {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ sqrtsd(xmm0, xmm0);
+ __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ // Load result into floating point register as return value.
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(desc.reloc_size == 0);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
@@ -301,11 +381,17 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
__ push(eax);
__ push(ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
@@ -399,6 +485,11 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ pop(ebx);
__ pop(eax);
+
+ // Restore esi.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
// eax: value
// ebx: target map
// Set transitioned map.
@@ -408,10 +499,8 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
ebx,
edi,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -424,12 +513,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map, success;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
__ push(eax);
__ push(edx);
__ push(ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
// Allocate new FixedArray.
@@ -446,6 +541,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ jmp(&entry);
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ bind(&only_change_map);
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&success);
+
// Call into runtime if GC is required.
__ bind(&gc_required);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -507,7 +616,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
ebx,
edi,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
@@ -522,6 +631,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Restore registers.
__ pop(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&success);
}
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index b37b54b8..d13fa759 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -222,19 +222,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// Register state just before return from JS function (from codegen-ia32.cc).
// ----------- S t a t e -------------
@@ -245,7 +232,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
+ // Register state for CallFunctionStub (from code-stubs-ia32.cc).
// ----------- S t a t e -------------
// -- edi: function
// -----------------------------------
@@ -253,6 +240,43 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-ia32.cc).
+ // ----------- S t a t e -------------
+ // -- ebx: cache cell for call target
+ // -- edi: function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-ia32.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallConstructStub (from code-stubs-ia32.cc).
+ // eax is the actual number of arguments not encoded as a smi see comment
+ // above IC call.
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (not smi)
+ // -- ebx: cache cell for call target
+ // -- edi: constructor function
+ // -----------------------------------
+ // The number of arguments in eax is not smi encoded.
+ Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+}
+
+
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction.
Label check_codesize;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 14f26757..92d7cc1c 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -205,13 +205,22 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x13;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
- ASSERT(check_code->entry() ==
- Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(check_code->entry(),
+ Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
@@ -228,11 +237,17 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x66; // 2 byte nop part 1
- *(call_target_address - 2) = 0x90; // 2 byte nop part 2
+
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
+ ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
+ } else {
+ ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
+ ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
+ }
+ ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+ *(call_target_address - 3) = kNopByteOne;
+ *(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -246,15 +261,21 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(replacement_code->entry(),
+ Assembler::target_address_at(call_target_address));
+
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
- *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x07; // offset
+ ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
+ ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
+ ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+ if (FLAG_count_based_interrupts) {
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ } else {
+ *(call_target_address - 3) = kJaeInstruction;
+ *(call_target_address - 2) = kJaeOffset;
+ }
Assembler::set_target_address_at(call_target_address,
check_code->entry());
@@ -406,14 +427,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & 0x4) == 0) {
- // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
- output_[0]->SetRegister(ebp.code(), frame_pointer);
+ output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -446,7 +460,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -468,16 +481,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
- input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
@@ -487,7 +497,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
@@ -499,7 +508,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// A marker value is used in place of the context.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
@@ -510,7 +518,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@@ -520,7 +527,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Number of incoming arguments.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@@ -540,6 +546,110 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = 6 * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+ // Construct stub can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ // The newly allocated object was passed as receiver in the artificial
+ // constructor stub environment created by HEnvironment::CopyForInlining().
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ construct_stub->instruction_start() +
+ isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
int node_id = iterator->Next();
@@ -575,11 +685,9 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -630,9 +738,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
- == fp_value);
+ ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@@ -651,6 +757,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = reinterpret_cast<uint32_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
+ output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(esi.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
@@ -821,17 +928,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
- // If frame was dynamically aligned, pop padding.
- Label sentinel, sentinel_done;
- __ pop(ecx);
- __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ j(equal, &sentinel);
- __ push(ecx);
- __ jmp(&sentinel_done);
- __ bind(&sentinel);
- __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(1));
- __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
@@ -843,17 +939,6 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
- if (type() == OSR) {
- // If alignment padding is added, push the sentinel.
- Label no_osr_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_osr_padding, Label::kNear);
- __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ bind(&no_osr_padding);
- }
-
-
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ede810c7..62a2c2af 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -100,6 +101,13 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
+ return 13;
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -113,12 +121,12 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -261,11 +269,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- int ignored = 0;
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
- EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
+ ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL);
}
VisitDeclarations(scope()->declarations());
}
@@ -303,15 +311,62 @@ void FullCodeGenerator::ClearAccumulator() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(delta)));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+ // Self-optimization is a one-off thing: if it fails, don't try again.
+ reset_value = Smi::kMaxValue;
+ }
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
+ __ mov(ebx, Immediate(profiling_counter_));
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(reset_value)));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 100;
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ // Count based interrupts happen often enough when they are enabled
+ // that the additional stack checks are not necessary (they would
+ // only check for interrupts).
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
@@ -324,6 +379,10 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -344,6 +403,31 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else if (FLAG_weighted_back_edges) {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ __ push(eax);
+ if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+ } else {
+ InterruptStub stub;
+ __ CallStub(&stub);
+ }
+ __ pop(eax);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+ }
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
@@ -681,8 +765,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
- FunctionLiteral* function,
- int* global_count) {
+ FunctionLiteral* function) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
@@ -691,7 +774,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
(mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
- ++(*global_count);
+ ++global_count_;
break;
case Variable::PARAMETER:
@@ -771,9 +854,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
@@ -835,7 +915,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, eax);
__ j(not_equal, &next_test);
@@ -884,6 +964,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit);
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
@@ -896,7 +978,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax);
// Check for proxies.
- Label call_runtime;
+ Label call_runtime, use_cache, fixed_array;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
__ j(below_equal, &call_runtime);
@@ -905,61 +987,19 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next;
- __ mov(ecx, eax);
- __ bind(&next);
-
- // Check that there are no elements. Register ecx contains the
- // current JS object we've reached through the prototype chain.
- __ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ j(not_equal, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in ebx for the subsequent
- // prototype load.
- __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
- __ JumpIfSmi(edx, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (edx). This is the case if the next enumeration
- // index field does not contain a smi.
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(edx, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- __ cmp(ecx, eax);
- __ j(equal, &check_prototype, Label::kNear);
- __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(edx, isolate()->factory()->empty_fixed_array());
- __ j(not_equal, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ cmp(ecx, isolate()->factory()->null_value());
- __ j(not_equal, &next);
+ __ CheckEnumCache(&call_runtime);
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(eax); // Duplicate the enumerable object on the stack.
+ __ push(eax);
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
- __ j(not_equal, &fixed_array, Label::kNear);
+ __ j(not_equal, &fixed_array);
+
// We got a map in register eax. Get the enumeration cache from it.
__ bind(&use_cache);
@@ -978,6 +1018,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register eax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
+
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Object>(
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ __ LoadHeapObject(ebx, cell);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -992,6 +1042,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
@@ -1034,7 +1085,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), ebx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
+ EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
@@ -1045,7 +1096,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1053,6 +1104,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(esp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1148,7 +1200,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- __ call(ic, mode);
+ CallIC(ic, mode);
}
@@ -1229,7 +1281,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
break;
}
@@ -1372,6 +1424,15 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ push(Immediate(isolate()->factory()->null_value()));
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1406,6 +1467,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1417,6 +1479,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
@@ -1429,7 +1493,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1448,20 +1512,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
break;
- default: UNREACHABLE();
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(esp, 0));
@@ -1693,14 +1765,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1721,7 +1793,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1806,13 +1878,13 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(eax);
}
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1847,7 +1919,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ call(ic);
+ CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1860,11 +1932,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ call(ic);
+ CallIC(ic);
break;
}
}
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(eax);
}
@@ -1878,7 +1949,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -1987,7 +2058,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2027,7 +2098,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2061,6 +2132,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+ ic_total_count_++;
+ __ call(code, rmode, ast_id);
+}
+
+
+
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@@ -2077,7 +2158,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position());
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2109,7 +2190,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2130,28 +2211,20 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
- bool record_call_target = !Serializer::enabled();
- if (record_call_target) {
+ if (!Serializer::enabled()) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- }
- CallFunctionStub stub(arg_count, flags);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->id());
- if (record_call_target) {
- // There is a one element cache in the instruction stream.
-#ifdef DEBUG
- int return_site_offset = masm()->pc_offset();
-#endif
Handle<Object> uninitialized =
- CallFunctionStub::UninitializedSentinel(isolate());
+ TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- __ test(eax, Immediate(cell));
- // Patching code in the stub assumes the opcode is 1 byte and there is
- // word for a pointer in the operand.
- ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ mov(ebx, cell);
}
+ CallFunctionStub stub(arg_count, flags);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ CallStub(&stub, expr->id());
+
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2325,9 +2398,23 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ SafeSet(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ // Record call targets in unoptimized code, but not in the snapshot.
+ CallFunctionFlags flags;
+ if (!Serializer::enabled()) {
+ flags = RECORD_CALL_TARGET;
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ mov(ebx, cell);
+ } else {
+ flags = NO_CALL_FUNCTION_FLAGS;
+ }
+
+ CallConstructStub stub(flags);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
}
@@ -2875,6 +2962,48 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done;
+ Register object = eax;
+ Register result = eax;
+ Register scratch = ecx;
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+ context()->Plug(result);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3691,7 +3820,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3849,7 +3978,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax);
}
@@ -3969,7 +4098,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4003,7 +4132,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4020,7 +4149,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4048,7 +4177,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ call(ic);
+ CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4228,7 +4357,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3a937900..33f247a3 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -765,7 +765,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -----------------------------------
Label slow, fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
- Label check_if_double_array, array, extra;
+ Label check_if_double_array, array, extra, transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@@ -862,11 +863,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ret(0);
__ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
+ // Escape to elements kind transition case.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &slow, Label::kNear);
+ __ CheckFastObjectElements(edi, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned.
@@ -882,8 +884,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
- __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
+ __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
+ &transition_double_elements, false);
__ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &non_double_value,
+ DONT_DO_SMI_CHECK);
+
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ ebx,
+ edi,
+ &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ &slow);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ &slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
}
@@ -1591,6 +1639,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
+ // Leaving the code managed by the register allocator and return to the
+ // convention of using esi as context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
}
@@ -1614,6 +1665,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
+ // Leaving the code managed by the register allocator and return to the
+ // convention of using esi as context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 46a35b6e..8fb4c791 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -67,7 +67,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
+ HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
@@ -79,9 +79,6 @@ bool LCodeGen::GenerateCode() {
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
- info()->osr_ast_id() != AstNode::kNoNumber;
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -156,29 +153,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- if (dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0),
- Immediate(isolate()->factory()->frame_alignment_marker()));
-
- __ bind(&do_not_pad);
- }
-
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -394,10 +368,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- if (environment->is_arguments_adaptor()) {
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- } else {
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -550,7 +532,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
- if (!e->is_arguments_adaptor()) {
+ if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
@@ -571,7 +553,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -622,7 +603,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
- ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
@@ -1266,6 +1246,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
+
Label done;
// If the object is a smi return the object.
__ JumpIfSmi(input, &done, Label::kNear);
@@ -1279,6 +1260,43 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(result));
+ ASSERT(object.is(eax));
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CmpObjectType(object, JS_DATE_TYPE, scratch);
+ __ Assert(equal, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand::StaticVariable(stamp));
+ __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ j(not_equal, &runtime, Label::kNear);
+ __ mov(result, FieldOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(Operand(esp, 0), object);
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
@@ -2080,17 +2098,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
- if (dynamic_frame_alignment_) {
- Label aligned;
- // Frame alignment marker (padding) is below arguments,
- // and receiver, so its return-address-relative offset is
- // (num_arguments + 2) words.
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(factory()->frame_alignment_marker()));
- __ j(not_equal, &aligned);
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&aligned);
- }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@@ -2580,15 +2587,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = ToRegister(instr->TempAt(0));
- ASSERT(receiver.is(eax)); // Used for parameter count.
- ASSERT(function.is(edi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(eax));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2630,6 +2632,17 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ mov(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(eax)); // Used for parameter count.
+ ASSERT(function.is(edi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(eax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -2690,6 +2703,15 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
}
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(esi));
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(instr->hydrogen()->pairs()));
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3034,16 +3056,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
+ class DeferredDoRandom: public LDeferredCode {
+ public:
+ DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LRandom* instr_;
+ };
+
+ DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ // Assert that the register size is indeed the size of each seed.
+ static const int kSeedSize = sizeof(uint32_t);
+ STATIC_ASSERT(kPointerSize == kSeedSize);
- __ PrepareCallCFunction(1, ebx);
__ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+ static const int kRandomSeedOffset =
+ FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+ __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
+ // ebx: FixedArray of the global context's random seeds
+
+ // Load state[0].
+ __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
+ // If state[0] == 0, call runtime to initialize seeds.
+ __ test(ecx, ecx);
+ __ j(zero, deferred->entry());
+ // Load state[1].
+ __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
+ // ecx: state[0]
+ // eax: state[1]
+
+ // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+ __ movzx_w(edx, ecx);
+ __ imul(edx, edx, 18273);
+ __ shr(ecx, 16);
+ __ add(ecx, edx);
+ // Save state[0].
+ __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
+
+ // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+ __ movzx_w(edx, eax);
+ __ imul(edx, edx, 36969);
+ __ shr(eax, 16);
+ __ add(eax, edx);
+ // Save state[1].
+ __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
+
+ // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+ __ shl(ecx, 14);
+ __ and_(eax, Immediate(0x3FFFF));
+ __ add(eax, ecx);
+ __ bind(deferred->exit());
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
@@ -3056,6 +3126,14 @@ void LCodeGen::DoRandom(LRandom* instr) {
}
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+ // Return value is in eax.
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3218,9 +3296,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -4186,6 +4264,94 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ // Allocate memory for the object. The initial map might change when
+ // the constructor's prototype changes, but instance size and property
+ // counts remain unchanged (if slack tracking finished).
+ ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+ __ AllocateInNewSpace(instance_size,
+ result,
+ no_reg,
+ scratch,
+ deferred->entry(),
+ TAG_OBJECT);
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(scratch, constructor);
+ __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
+
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(map);
+ __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
+ instance_size >> kPointerSizeLog2);
+ __ Assert(equal, "Unexpected instance size");
+ __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
+ initial_map->pre_allocated_property_fields());
+ __ Assert(equal, "Unexpected pre-allocated property fields count");
+ __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
+ initial_map->unused_property_fields());
+ __ Assert(equal, "Unexpected unused property fields count");
+ __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
+ initial_map->inobject_properties());
+ __ Assert(equal, "Unexpected in-object property fields count");
+ }
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ mov(FieldOperand(result, JSObject::kMapOffset), map);
+ __ mov(scratch, factory()->empty_fixed_array());
+ __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ if (initial_map->inobject_properties() != 0) {
+ __ mov(scratch, factory()->undefined_value());
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ mov(FieldOperand(result, property_offset), scratch);
+ }
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ PushHeapObject(constructor);
+ CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Heap* heap = isolate()->heap();
@@ -4251,26 +4417,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ Assert(equal, "Unexpected object literal boilerplate");
}
+ // Only elements backing stores for non-COW arrays need to be copied.
+ Handle<FixedArrayBase> elements(object->elements());
+ bool has_elements = elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map();
+
// Increase the offset so that subsequent objects end up right after
- // this one.
- int current_offset = *offset;
- int size = object->map()->instance_size();
- *offset += size;
+ // this object and its backing store.
+ int object_offset = *offset;
+ int object_size = object->map()->instance_size();
+ int elements_offset = *offset + object_size;
+ int elements_size = has_elements ? elements->Size() : 0;
+ *offset += object_size + elements_size;
// Copy object header.
ASSERT(object->properties()->length() == 0);
- ASSERT(object->elements()->length() == 0 ||
- object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties();
- int header_size = size - inobject_properties * kPointerSize;
+ int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) {
- __ mov(ecx, FieldOperand(source, i));
- __ mov(FieldOperand(result, current_offset + i), ecx);
+ if (has_elements && i == JSObject::kElementsOffset) {
+ __ lea(ecx, Operand(result, elements_offset));
+ } else {
+ __ mov(ecx, FieldOperand(source, i));
+ }
+ __ mov(FieldOperand(result, object_offset + i), ecx);
}
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
- int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -4285,10 +4460,54 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ mov(FieldOperand(result, total_offset), Immediate(value));
}
}
+
+ if (has_elements) {
+ // Copy elements backing store header.
+ __ LoadHeapObject(source, elements);
+ for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+ __ mov(ecx, FieldOperand(source, i));
+ __ mov(FieldOperand(result, elements_offset + i), ecx);
+ }
+
+ // Copy elements backing store content.
+ int elements_length = elements->length();
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(FieldOperand(result, total_offset), Immediate(value_low));
+ __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ lea(ecx, Operand(result, *offset));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ mov(FieldOperand(result, total_offset), ecx);
+ } else {
+ __ mov(FieldOperand(result, total_offset), Immediate(value));
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
}
-void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
int size = instr->hydrogen()->total_size();
@@ -4310,14 +4529,14 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
}
-void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements()
@@ -4414,7 +4633,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(esi);
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? factory()->true_value()
@@ -4682,6 +4901,84 @@ void LCodeGen::DoIn(LIn* instr) {
}
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ cmp(eax, isolate()->factory()->null_value());
+ DeoptimizeIf(equal, instr->environment());
+
+ __ test(eax, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ DeoptimizeIf(below_equal, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ jmp(&use_cache, Label::kNear);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(eax);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->meta_map());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ __ LoadInstanceDescriptors(map, result);
+ __ mov(result,
+ FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ __ mov(result,
+ FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ test(result, result);
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ __ cmp(ToRegister(instr->map()),
+ FieldOperand(object, HeapObject::kMapOffset));
+ DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+
+ Label out_of_object, done;
+ __ cmp(index, Immediate(0));
+ __ j(less, &out_of_object);
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ JSObject::kHeaderSize));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&out_of_object);
+ __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ neg(index);
+ // Index is now equal to out of object property index plus 1.
+ __ mov(object, FieldOperand(object,
+ index,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(&done);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index d86d48cd..52befc69 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
@@ -105,8 +104,10 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -143,10 +144,6 @@ class LCodeGen BASE_EMBEDDED {
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
- void set_dynamic_frame_alignment(bool value) {
- dynamic_frame_alignment_ = value;
- }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -331,7 +328,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
- bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index c81aca8a..2bfbb67b 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -368,11 +368,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
- spill_slot_count_++;
- num_double_slots_++;
- }
+ if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
@@ -388,7 +384,7 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
+ HPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@@ -551,7 +547,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
+ HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -580,11 +576,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
}
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -675,7 +666,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
- allocator_->RecordUse(value, operand);
+ operand->set_virtual_register(value->id());
return operand;
}
@@ -683,19 +674,13 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
+ result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr,
@@ -807,21 +792,24 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
+ operand->set_virtual_register(allocator_->GetVirtualRegister());
+ if (!allocator_->AllocationOk()) {
+ Abort("Not enough virtual registers (temps).");
+ }
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
@@ -1013,11 +1001,12 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
+ ASSERT(ast_id != AstNode::kNoNumber ||
+ hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->is_arguments_adaptor(),
+ hydrogen_env->frame_type(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
@@ -1039,7 +1028,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
result->AddValue(op, value->representation());
}
- if (!hydrogen_env->is_arguments_adaptor()) {
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -1118,17 +1107,25 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegister(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LOperand* temp = TempRegister();
+ LWrapReceiver* result =
+ new(zone()) LWrapReceiver(receiver, function, temp);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
- LOperand* temp = FixedTemp(edx);
LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
- elements,
- temp);
+ elements);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1158,6 +1155,12 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
}
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalObject(context));
@@ -1196,7 +1199,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
return DefineSameAsFirst(result);
- } else if (op == kMathSin || op == kMathCos) {
+ } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
@@ -1648,6 +1651,14 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
}
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* date = UseFixed(instr->value(), eax);
+ LDateField* result =
+ new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1817,34 +1828,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
-
- LInstruction* result;
- if (input_rep.IsDouble()) {
- LOperand* reg = UseRegister(value);
- LOperand* temp_reg =
- CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
- result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
- } else if (input_rep.IsInteger32()) {
- // Canonicalization should already have removed the hydrogen instruction in
- // this case, since it is a noop.
- UNREACHABLE();
- return NULL;
- } else {
- ASSERT(input_rep.IsTagged());
- LOperand* reg = UseRegister(value);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* xmm_temp =
- CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
- result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
- }
- return AssignEnvironment(result);
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new(zone()) LReturn(UseFixed(instr->value(), eax));
}
@@ -1937,13 +1920,14 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
if (instr->need_generic()) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* obj = UseFixed(instr->object(), eax);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
+ LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
@@ -2214,25 +2198,32 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* temp = TempRegister();
+ LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
- DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
+ DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteralFast(context), eax), instr);
+ DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
- HObjectLiteralGeneric* instr) {
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteralGeneric(context), eax), instr);
+ DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
}
@@ -2387,7 +2378,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind());
+ instr->call_kind(),
+ instr->is_construct());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2411,6 +2403,35 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
}
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseFixed(instr->enumerable(), eax);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+ return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseTempRegister(instr->index());
+ return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 67bf9376..4ecce96d 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -43,6 +43,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -81,11 +82,13 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
+ V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -129,8 +132,7 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteralFast) \
- V(ObjectLiteralGeneric) \
+ V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -166,7 +168,13 @@ class LCodeGen;
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
- V(ValueOf)
+ V(ValueOf) \
+ V(ForInPrepareMap) \
+ V(ForInCacheArray) \
+ V(CheckMapValue) \
+ V(LoadFieldByIndex) \
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -449,18 +457,33 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
-class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
+class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LWrapReceiver(LOperand* receiver,
+ LOperand* function,
+ LOperand* temp) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements,
- LOperand* temp) {
+ LOperand* elements) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@@ -996,6 +1019,24 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index)
+ : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
@@ -1385,6 +1426,17 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
+class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LDeclareGlobals(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
@@ -1979,42 +2031,56 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
+class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LArrayLiteral(LOperand* context) {
+ LAllocateObject(LOperand* context, LOperand* temp) {
inputs_[0] = context;
+ temps_[0] = temp;
}
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
+
LOperand* context() { return inputs_[0]; }
+};
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+
+class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LFastLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
};
-class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LObjectLiteralFast(LOperand* context) {
+ explicit LArrayLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
-class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> {
+class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LObjectLiteralGeneric(LOperand* context) {
+ explicit LObjectLiteral(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
@@ -2156,6 +2222,64 @@ class LIn: public LTemplateInstruction<1, 3, 0> {
};
+class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LForInPrepareMap(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@@ -2165,7 +2289,6 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
- num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2179,8 +2302,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
- int num_double_slots() const { return num_double_slots_; }
-
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2222,7 +2343,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@@ -2233,7 +2353,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
- isolate_(graph->isolate()),
+ zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2263,7 +2383,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
- Zone* zone() { return isolate_->zone(); }
+ Zone* zone() { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2273,7 +2393,6 @@ class LChunkBuilder BASE_EMBEDDED {
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2324,8 +2443,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2375,7 +2492,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
- Isolate* isolate_;
+ Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d0d9e19c..60e38a6c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -764,8 +764,7 @@ void MacroAssembler::LeaveApiExitFrame() {
}
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type,
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
@@ -776,25 +775,21 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// We will build up the handler from the bottom by pushing on the stack.
- // First compute the state and push the frame pointer and context.
- unsigned state = StackHandler::OffsetField::encode(handler_index);
- if (try_location == IN_JAVASCRIPT) {
- push(ebp);
- push(esi);
- state |= (type == TRY_CATCH_HANDLER)
- ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
- : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
+ // First push the frame pointer and context.
+ if (kind == StackHandler::JS_ENTRY) {
// The frame pointer does not point to a JS frame so we save NULL for
// ebp. We expect the code throwing an exception to check ebp before
// dereferencing it to restore the context.
push(Immediate(0)); // NULL frame pointer.
push(Immediate(Smi::FromInt(0))); // No context.
- state |= StackHandler::KindField::encode(StackHandler::ENTRY);
+ } else {
+ push(ebp);
+ push(esi);
}
-
// Push the state and the code object.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
push(Immediate(state));
Push(CodeObject());
@@ -867,8 +862,7 @@ void MacroAssembler::Throw(Register value) {
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -878,21 +872,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in eax.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
- mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- mov(Operand::StaticVariable(pending_exception), eax);
- } else if (!value.is(eax)) {
+ if (!value.is(eax)) {
mov(eax, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address));
@@ -904,7 +886,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
- STATIC_ASSERT(StackHandler::ENTRY == 0);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
test(Operand(esp, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::KindField::kMask));
j(not_zero, &fetch_next);
@@ -2168,6 +2150,46 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+ j(not_equal, no_map_match);
+
+ // Use the transitioned cached map.
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ mov(map_out, FieldOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ map_out,
+ scratch,
+ &done);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2754,6 +2776,46 @@ void MacroAssembler::EnsureNotWhite(
bind(&done);
}
+
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Label next;
+ mov(ecx, eax);
+ bind(&next);
+
+ // Check that there are no elements. Register ecx contains the
+ // current JS object we've reached through the prototype chain.
+ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ j(not_equal, call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+ JumpIfSmi(edx, call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (edx). This is the case if the next enumeration
+ // index field does not contain a smi.
+ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
+ JumpIfSmi(edx, call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ cmp(ecx, eax);
+ j(equal, &check_prototype, Label::kNear);
+ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ cmp(edx, isolate()->factory()->empty_fixed_array());
+ j(not_equal, call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ bind(&check_prototype);
+ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ cmp(ecx, isolate()->factory()->null_value());
+ j(not_equal, &next);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 0fcb94fd..66d1ce7d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -221,6 +221,22 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the global context if the map in register
+ // map_in_out is the cached Array map in the global context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out);
+
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -475,17 +491,16 @@ class MacroAssembler: public Assembler {
// Exception handling
// Push a new try handler and link it into try handler chain.
- void PushTryHandler(CodeLocation try_location,
- HandlerType type,
- int handler_index);
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
- // Activate the top handler in the try hander chain.
+ // Throw to the top handler in the try hander chain.
void Throw(Register value);
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ // Throw past all JS frames to the top JS entry frame.
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@@ -814,6 +829,10 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ // Expects object in eax and returns map with validated enum cache
+ // in eax. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
+
private:
bool generating_stub_;
bool allow_stub_calls_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index e613a06c..04d6b62c 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -523,7 +523,7 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(eax, Operand(current_character(), -minus));
__ and_(eax, mask);
__ cmp(eax, c);
@@ -1085,7 +1085,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index f6f42417..fd267798 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -44,19 +44,30 @@ static void ProbeTable(Isolate* isolate,
Code::Flags flags,
StubCache::Table table,
Register name,
+ Register receiver,
+ // Number of the cache entry pointer-size scaled.
Register offset,
Register extra) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
Label miss;
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ lea(offset, Operand(offset, offset, times_2, 0));
+
if (extra.is_valid()) {
// Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
+ __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
// Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
@@ -65,6 +76,14 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
@@ -75,11 +94,19 @@ static void ProbeTable(Isolate* isolate,
__ push(offset);
// Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
+ __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+ __ j(not_equal, &miss);
+
+ // Check the map matches.
+ __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+ __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
+ // Restore offset register.
+ __ mov(offset, Operand(esp, 0));
+
// Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
@@ -87,9 +114,17 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
+
// Restore offset and re-load code entry from cache.
__ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
+ __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -159,12 +194,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
- Register extra2) {
+ Register extra2,
+ Register extra3) {
Label miss;
- // Assert that code is valid. The shifting code relies on the entry size
- // being 8.
- ASSERT(sizeof(Entry) == 8);
+ // Assert that code is valid. The multiplying code relies on the entry size
+ // being 12.
+ ASSERT(sizeof(Entry) == 12);
// Assert the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -176,37 +212,51 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
- // Assert scratch and extra registers are valid, and extra2 is unused.
+ // Assert scratch and extra registers are valid, and extra2/3 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
+ ASSERT(extra3.is(no_reg));
+
+ Register offset = scratch;
+ scratch = no_reg;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, flags);
- __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ // We mask out the last two bits because they are not part of the hash and
+ // they are always 01 for maps. Also in the two 'and' instructions below.
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ // ProbeTable expects the offset to be pointer scaled, which it is, because
+ // the heap object tag size is 2 and the pointer size log 2 is also 2.
+ ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, scratch, extra);
+ ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
- __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, flags);
- __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(scratch, name);
- __ add(scratch, Immediate(flags));
- __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+ __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
+ __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(offset, flags);
+ __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+ __ sub(offset, name);
+ __ add(offset, Immediate(flags));
+ __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
- ProbeTable(isolate(), masm, flags, kSecondary, name, scratch, extra);
+ ProbeTable(
+ isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}
@@ -1195,14 +1245,9 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(edx, miss);
- }
// Check that the maps haven't changed.
+ __ JumpIfSmi(edx, miss);
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
}
@@ -1345,25 +1390,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else {
Label call_builtin;
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
if (argc == 1) { // Otherwise fall through to call builtin.
Label attempt_to_grow_elements, with_write_barrier;
+ // Get the elements array of the object.
+ __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(factory()->fixed_array_map()));
+ __ j(not_equal, &call_builtin);
+
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(eax, Immediate(Smi::FromInt(argc)));
- // Get the element's length into ecx.
- __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ // Get the elements' length into ecx.
+ __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(eax, ecx);
@@ -1376,29 +1421,52 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Push the element.
- __ lea(edx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
+ // Store the value.
+ __ mov(FieldOperand(edi,
+ eax,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize),
+ ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &call_builtin);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ Label fast_object, not_fast_object;
+ __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
+ __ jmp(&fast_object);
+ // In case of fast smi-only, convert to fast object, otherwise bail out.
+ __ bind(&not_fast_object);
+ __ CheckFastSmiOnlyElements(ebx, &call_builtin);
+ // edi: elements array
+ // edx: receiver
+ // ebx: map
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ ebx,
+ edi,
+ &call_builtin);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ // Restore edi.
+ __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
+ __ bind(&fast_object);
+ } else {
+ __ CheckFastObjectElements(ebx, &call_builtin);
+ }
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Push the element.
- __ lea(edx, FieldOperand(ebx,
+ // Store the value.
+ __ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
- __ RecordWrite(ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ ret((argc + 1) * kPointerSize);
@@ -1408,11 +1476,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&call_builtin);
}
- __ mov(edi, Operand(esp, argc * kPointerSize));
+ __ mov(ebx, Operand(esp, argc * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
- __ JumpIfSmi(edi, &no_fast_elements_check);
+ __ JumpIfSmi(ebx, &no_fast_elements_check);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
__ bind(&no_fast_elements_check);
@@ -1431,7 +1499,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements.
- __ lea(edx, FieldOperand(ebx,
+ __ lea(edx, FieldOperand(edi,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, ecx);
@@ -1444,7 +1512,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Push the argument...
- __ mov(Operand(edx, 0), edi);
+ __ mov(Operand(edx, 0), ebx);
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
@@ -1456,13 +1524,13 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// tell the incremental marker to rescan the object that we just grew. We
// don't need to worry about the holes because they are in old space and
// already marked black.
- __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+ __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
- __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
+ __ add(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(kAllocationDelta)));
// NOTE: This only happen in new-space, where we don't
@@ -2568,7 +2636,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray, elements_kind).GetCode();
+ KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
@@ -2756,14 +2824,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual loads. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(eax, &miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(eax, &miss);
CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.
@@ -3695,14 +3757,16 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, grow, slow, transition_elements_kind;
+ Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3710,24 +3774,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &miss_force_generic);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &transition_elements_kind);
+ }
+
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
__ j(above_equal, &miss_force_generic);
}
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ j(not_equal, &miss_force_generic);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@@ -3745,8 +3817,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
FixedArray::kHeaderSize));
__ mov(Operand(ecx, 0), eax);
// Make sure to preserve the value in register eax.
- __ mov(edx, eax);
- __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
+ __ mov(ebx, eax);
+ __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
}
// Done.
@@ -3762,19 +3834,94 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Handle transition requiring the array to grow.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ // Restore the key, which is known to be the array length.
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ mov(FieldOperand(edi, JSObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
+ }
+
+ // Store the element at index zero.
+ __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
+
+ // Install the new backing store in the JSArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ ret(0);
+
+ __ bind(&check_capacity);
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
+ __ j(equal, &miss_force_generic);
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&finish_store);
+
+ __ bind(&prepare_slow);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3789,19 +3936,20 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
+ __ j(above_equal, &miss_force_generic);
}
- __ j(above_equal, &miss_force_generic);
- __ StoreNumberToDoubleElements(eax,
- edi,
- ecx,
- edx,
- xmm0,
- &transition_elements_kind,
- true);
+ __ bind(&finish_store);
+ __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
+ &transition_elements_kind, true);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3814,6 +3962,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Handle transition requiring the array to grow.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(eax, &value_is_smi);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
+ __ j(not_equal, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ mov(FieldOperand(edi, JSObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+ __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
+ Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+
+ // Install the new backing store in the JSArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&finish_store);
+
+ __ bind(&prepare_slow);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}