summaryrefslogtreecommitdiffstats
path: root/src/arm
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2012-04-11 10:23:59 +0100
committerBen Murdoch <benm@google.com>2012-04-11 15:40:41 +0100
commit5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b (patch)
tree7b717e53b80c4a64bf9b723aabcf7c909ae3c243 /src/arm
parentc7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9 (diff)
downloadandroid_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.tar.gz
android_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.tar.bz2
android_external_v8-5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b.zip
Merge V8 3.9 at 3.9.24.9
http://v8.googlecode.com/svn/branches/3.9@11260 Bug: 5688872 Change-Id: Iddd944e82189d92df3fc427dc5f0d3f1b2f0c6c8
Diffstat (limited to 'src/arm')
-rw-r--r--src/arm/assembler-arm-inl.h12
-rw-r--r--src/arm/assembler-arm.cc31
-rw-r--r--src/arm/assembler-arm.h70
-rw-r--r--src/arm/builtins-arm.cc131
-rw-r--r--src/arm/code-stubs-arm.cc389
-rw-r--r--src/arm/codegen-arm.cc93
-rw-r--r--src/arm/debug-arm.cc40
-rw-r--r--src/arm/deoptimizer-arm.cc156
-rw-r--r--src/arm/full-codegen-arm.cc338
-rw-r--r--src/arm/ic-arm.cc70
-rw-r--r--src/arm/lithium-arm.cc509
-rw-r--r--src/arm/lithium-arm.h162
-rw-r--r--src/arm/lithium-codegen-arm.cc447
-rw-r--r--src/arm/lithium-codegen-arm.h2
-rw-r--r--src/arm/macro-assembler-arm.cc152
-rw-r--r--src/arm/macro-assembler-arm.h39
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc10
-rw-r--r--src/arm/simulator-arm.cc4
-rw-r--r--src/arm/stub-cache-arm.cc398
19 files changed, 2191 insertions, 862 deletions
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index dd8ffcd7..d5db686c 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -80,7 +80,7 @@ Address RelocInfo::target_address_address() {
int RelocInfo::target_address_size() {
- return Assembler::kExternalTargetSize;
+ return kPointerSize;
}
@@ -364,8 +364,14 @@ Address Assembler::target_address_at(Address pc) {
}
-void Assembler::set_target_at(Address constant_pool_entry,
- Address target) {
+void Assembler::deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_external_target_at(Address constant_pool_entry,
+ Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 25922361..ec28da40 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -66,11 +66,13 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. This won't work for cross
- // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
-#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+ // generation even when generating snapshots. ARMv7 and hardware floating
+ // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
+#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
+ && !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7;
-#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
+#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
+ // && !defined(__SOFTFP__)
#endif // def __arm__
return answer;
@@ -137,7 +139,6 @@ bool RelocInfo::IsCodedSpecially() {
}
-
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -236,25 +237,27 @@ MemOperand::MemOperand(Register rn, Register rm,
// add(sp, sp, 4) instruction (aka Pop())
const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+ al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
+ kRegister_sp_Code * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | sp.code() * B16;
+ al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | sp.code() * B16;
+ al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
// mov lr, pc
-const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
+const Instr kBlxIp = al | kBlxRegPattern | ip.code();
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@@ -271,13 +274,13 @@ const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | fp.code() * B16;
+ al | B26 | L | Offset | kRegister_fp_Code * B16;
const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | fp.code() * B16;
+ al | B26 | Offset | kRegister_fp_Code * B16;
const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | fp.code() * B16;
+ al | B26 | L | NegOffset | kRegister_fp_Code * B16;
const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | fp.code() * B16;
+ al | B26 | NegOffset | kRegister_fp_Code * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 11e39df6..e2d5f598 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -124,24 +124,47 @@ struct Register {
int code_;
};
-const Register no_reg = { -1 };
-
-const Register r0 = { 0 };
-const Register r1 = { 1 };
-const Register r2 = { 2 };
-const Register r3 = { 3 };
-const Register r4 = { 4 };
-const Register r5 = { 5 };
-const Register r6 = { 6 };
-const Register r7 = { 7 };
-const Register r8 = { 8 }; // Used as context register.
-const Register r9 = { 9 }; // Used as lithium codegen scratch register.
-const Register r10 = { 10 }; // Used as roots register.
-const Register fp = { 11 };
-const Register ip = { 12 };
-const Register sp = { 13 };
-const Register lr = { 14 };
-const Register pc = { 15 };
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
+
+const Register no_reg = { kRegister_no_reg_Code };
+
+const Register r0 = { kRegister_r0_Code };
+const Register r1 = { kRegister_r1_Code };
+const Register r2 = { kRegister_r2_Code };
+const Register r3 = { kRegister_r3_Code };
+const Register r4 = { kRegister_r4_Code };
+const Register r5 = { kRegister_r5_Code };
+const Register r6 = { kRegister_r6_Code };
+const Register r7 = { kRegister_r7_Code };
+// Used as context register.
+const Register r8 = { kRegister_r8_Code };
+// Used as lithium codegen scratch register.
+const Register r9 = { kRegister_r9_Code };
+// Used as roots register.
+const Register r10 = { kRegister_r10_Code };
+const Register fp = { kRegister_fp_Code };
+const Register ip = { kRegister_ip_Code };
+const Register sp = { kRegister_sp_Code };
+const Register lr = { kRegister_lr_Code };
+const Register pc = { kRegister_pc_Code };
+
// Single word VFP register.
struct SwVfpRegister {
@@ -581,6 +604,7 @@ extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kBlxIp;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
@@ -662,20 +686,18 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
- inline static void set_target_at(Address constant_pool_entry, Address target);
+ inline static void deserialization_set_special_target_at(
+ Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
- Address target) {
- set_target_at(constant_pool_entry, target);
- }
+ Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
- static const int kCallTargetSize = kPointerSize;
- static const int kExternalTargetSize = kPointerSize;
+ static const int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index b461b45a..c99e778a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -114,9 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- // Load the initial map from the array function.
- __ ldr(scratch1, FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -210,9 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ ldr(elements_array_storage,
- FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@@ -317,7 +313,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element;
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -422,6 +418,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
+
+ __ bind(&finish);
__ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
@@ -434,8 +432,39 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Jump(lr);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(
+ r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(r3, r4);
__ b(call_generic_code);
+
+ __ bind(&not_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // r3: JSArray
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r2,
+ r9,
+ &cant_transition_map);
+ __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ RecordWriteField(r3,
+ HeapObject::kMapOffset,
+ r2,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ Label loop2;
+ __ sub(r7, r7, Operand(kPointerSize));
+ __ bind(&loop2);
+ __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ cmp(r4, r5);
+ __ b(lt, &loop2);
+ __ b(&finish);
}
@@ -667,7 +696,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -675,42 +706,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label slow, non_function_call;
- // Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r2: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
@@ -933,23 +928,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: JSObject
__ bind(&allocated);
__ push(r4);
+ __ push(r4);
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
- // r1: constructor function
+ // Reload the number of arguments and the constructor from the stack.
// sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -959,14 +946,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
- // r2: address of last argument (caller sp)
// r1: constructor function
+ // r2: address of last argument (caller sp)
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
+ // sp[1]: receiver
+ // sp[2]: constructor function
+ // sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ b(&entry);
__ bind(&loop);
@@ -992,12 +978,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
+ // Store offset of return address for deoptimizer.
+ if (!is_api_function && !count_constructions) {
+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+ }
// Restore context from the frame.
// r0: result
@@ -1117,7 +1101,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
@@ -1297,7 +1282,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
@@ -1760,7 +1745,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ Call(r3);
+ // Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Jump(lr);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c33df5cf..f772db9b 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -122,7 +122,6 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
-
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -157,20 +156,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 0));
// Set up the object header.
- __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the previous context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -229,14 +226,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
-
- // Copy the global object from the previous context.
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+ __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -326,8 +321,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Label double_elements, check_fast_elements;
__ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
__ b(ne, &check_fast_elements);
GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
@@ -336,8 +330,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&check_fast_elements);
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &double_elements);
GenerateFastCloneShallowArrayCommon(masm, length_,
CLONE_ELEMENTS, &slow_case);
@@ -487,7 +480,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
+ const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
@@ -590,7 +583,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Label is_smi, done;
- __ JumpIfSmi(object, &is_smi);
+ // Smi-check
+ __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+ // Heap number check
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
@@ -612,7 +607,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
- __ SmiUntag(scratch1, object);
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
if (destination == kCoreRegisters) {
@@ -647,11 +641,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
- Label is_smi;
Label done;
Label not_in_int32_range;
- __ JumpIfSmi(object, &is_smi);
+ __ UntagAndJumpIfSmi(dst, object, &done);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_number);
@@ -671,10 +664,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
scratch1,
scratch2,
scratch3);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ SmiUntag(dst, object);
__ bind(&done);
}
@@ -847,10 +836,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Label done;
- // Untag the object into the destination register.
- __ SmiUntag(dst, object);
- // Just return if the object is a smi.
- __ JumpIfSmi(object, &done);
+ __ UntagAndJumpIfSmi(dst, object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
@@ -2338,7 +2324,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
__ cmp(ip, Operand(scratch2));
__ b(ne, &not_smi_result);
// Go slow on zero result to handle -0.
- __ tst(scratch1, Operand(scratch1));
+ __ cmp(scratch1, Operand(0));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ Ret(ne);
// We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -3310,8 +3296,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
- __ b(ne, &calculate);
- __ cmp(r3, r5);
+ __ cmp(r3, r5, eq);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
@@ -3454,6 +3439,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1;
@@ -3468,7 +3458,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch = r9;
const Register scratch2 = r7;
- Label call_runtime, done, exponent_not_smi, int_exponent;
+ Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
@@ -3479,7 +3469,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(base, &base_is_smi);
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@@ -3488,16 +3478,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
- __ SmiUntag(base);
- __ vmov(single_scratch, base);
+ __ vmov(single_scratch, scratch);
__ vcvt_f64_s32(double_base, single_scratch);
__ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@@ -3505,11 +3491,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -3582,13 +3565,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent_convert);
__ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(exponent, single_scratch);
+ __ vmov(scratch, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
- __ mov(scratch, exponent); // Back up exponent.
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type_ == INTEGER) {
+ __ mov(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
+ }
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0);
@@ -3690,17 +3679,6 @@ void CEntryStub::GenerateAheadOfTime() {
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(r0);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, r0);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3881,13 +3859,27 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ mov(r0, Operand(false, RelocInfo::NONE));
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ str(r0, MemOperand(r2));
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(r0);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(r0);
}
@@ -3980,7 +3972,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// handler block in this code object, so its index is 0.
__ bind(&invoke);
// Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
+ __ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -4098,11 +4090,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(function, ip);
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(map, ip);
+ __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
__ b(ne, &miss);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4247,7 +4237,7 @@ Register InstanceofStub::right() { return r1; }
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
- static const int kDisplacement =
+ const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
// Check that the key is a smi.
@@ -4632,10 +4622,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// sp[8]: subject string
// sp[12]: JSRegExp object
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
+ const int kLastMatchInfoOffset = 0 * kPointerSize;
+ const int kPreviousIndexOffset = 1 * kPointerSize;
+ const int kSubjectOffset = 2 * kPointerSize;
+ const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime, invoke_regexp;
@@ -4656,7 +4646,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -4727,8 +4717,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
@@ -4835,8 +4824,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 8;
- static const int kParameterRegisters = 4;
+ const int kRegExpExecuteArguments = 8;
+ const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -4931,10 +4920,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label termination_exception;
__ b(eq, &termination_exception);
- __ Throw(r0); // Expects thrown value in r0.
+ __ Throw(r0);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
+ __ ThrowUncatchable(r0);
__ bind(&failure);
// For failure and exception return null.
@@ -5082,11 +5071,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set input, index and length fields from arguments.
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
+ __ ldr(r2, MemOperand(sp, kPointerSize * 1));
+ __ ldr(r6, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 1));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+ __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
@@ -5108,9 +5097,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
- __ tst(r5, Operand(r5));
+ __ cmp(r5, Operand(0));
__ bind(&loop);
- __ b(le, &done); // Jump if r1 is negative or zero.
+ __ b(le, &done); // Jump if r5 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
__ jmp(&loop);
@@ -5124,24 +5113,48 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Handle<Code> code) {
- code->set_has_function_cache(false);
-}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ Label done;
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
+ // Load the cache state into r3.
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(r3, r1);
+ __ b(eq, &done);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &done);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
+ __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
+ // An uninitialized cache is patched with the function.
+ __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
+ // No need for a write barrier here - cells are rescanned.
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
+ __ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
+ // r2 : cache cell for call target
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -5219,6 +5232,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // r0 : number of arguments
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(r1, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // r0: number of arguments
+ // r1: called object
+ // r3: object type
+ Label do_call;
+ __ bind(&slow);
+ __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing r0).
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
void CompareStub::PrintName(StringStream* stream) {
@@ -5370,8 +5425,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_, Operand(ip));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
__ bind(&exit_);
}
@@ -5660,7 +5714,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// scratch: -
// Perform a number of probes in the symbol table.
- static const int kProbes = 4;
+ const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
@@ -5785,9 +5839,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// 0 <= from <= to <= string.length.
// If any of these assumptions fail, we call the runtime system.
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
+ const int kToOffset = 0 * kPointerSize;
+ const int kFromOffset = 1 * kPointerSize;
+ const int kStringOffset = 2 * kPointerSize;
__ Ldrd(r2, r3, MemOperand(sp, kToOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
@@ -5799,10 +5853,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
// If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
- __ b(mi, &runtime); // From is negative.
-
+ // We want to bailout to runtime here if From is negative. In that case, the
+ // next instruction is not executed and we fall through to bailing out to
+ // runtime. pl is the opposite of mi.
// Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC);
+ __ sub(r2, r2, Operand(r3), SetCC, pl);
__ b(mi, &runtime); // Fail if from > to.
// Make sure first argument is a string.
@@ -5875,9 +5930,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r5, ASR, 1));
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+ __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
// Update instance type.
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
@@ -5914,8 +5969,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
- __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ jmp(&return_r0);
__ bind(&copy_routine);
@@ -6020,7 +6075,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ tst(length, Operand(length));
+ __ cmp(length, Operand(0));
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6053,7 +6108,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ tst(min_length, Operand(min_length));
+ __ cmp(min_length, Operand(0));
__ b(eq, &compare_lengths);
// Compare loop.
@@ -6505,15 +6560,15 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
- Label unordered;
+ Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
+ __ b(ne, &maybe_undefined1);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
+ __ b(ne, &maybe_undefined2);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
@@ -6537,14 +6592,28 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
__ Ret();
-
- __ bind(&unordered);
}
+ __ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ bind(&maybe_undefined1);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &miss);
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &maybe_undefined2);
+ __ jmp(&unordered);
+ }
+
+ __ bind(&maybe_undefined2);
+ if (Token::IsOrderedRelationalCompareOp(op_)) {
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &unordered);
+ }
+
__ bind(&miss);
GenerateMiss(masm);
}
@@ -6592,6 +6661,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
Label miss;
+ bool equality = Token::IsEqualityOp(op_);
+
// Registers containing left and right operands respectively.
Register left = r1;
Register right = r0;
@@ -6625,28 +6696,39 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsSymbolMask));
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- __ Ret(ne);
+ if (equality) {
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(tmp3, tmp1, Operand(tmp2));
+ __ tst(tmp3, Operand(kIsSymbolMask));
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ __ Ret(ne);
+ }
// Check that both strings are sequential ASCII.
Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
- &runtime);
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(
+ tmp1, tmp2, tmp3, tmp4, &runtime);
// Compare flat ASCII strings. Returns when done.
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
+ if (equality) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(
+ masm, left, right, tmp1, tmp2, tmp3, tmp4);
+ }
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ if (equality) {
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ } else {
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -6757,7 +6839,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
- // (their names are the null value).
+ // (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
@@ -6785,10 +6867,17 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(eq, done);
if (i != kInlinedProbes - 1) {
+ // Load the hole ready for use below:
+ __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
// Stop if found the property.
__ cmp(entity_name, Operand(Handle<String>(name)));
__ b(eq, miss);
+ Label the_hole;
+ __ cmp(entity_name, tmp);
+ __ b(eq, &the_hole);
+
// Check if the entry name is not a symbol.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
@@ -6796,6 +6885,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ tst(entity_name, Operand(kIsSymbolMask));
__ b(eq, miss);
+ __ bind(&the_hole);
+
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -6811,7 +6902,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
@@ -6888,7 +6979,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
@@ -6994,41 +7085,45 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
+#define REG(Name) { kRegister_ ## Name ## _Code }
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { r6, r4, r7, EMIT_REMEMBERED_SET },
- { r6, r2, r7, EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
- { r3, r4, r5, EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
- { r4, r1, r2, OMIT_REMEMBERED_SET },
+ { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r1, r2, r3, EMIT_REMEMBERED_SET },
- { r3, r2, r1, EMIT_REMEMBERED_SET },
+ { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { r2, r1, r3, EMIT_REMEMBERED_SET },
- { r3, r1, r2, EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { r4, r2, r3, EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
- { r2, r3, r9, EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
- { r6, r2, r0, EMIT_REMEMBERED_SET },
- { r2, r6, r9, EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
+ { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
- { r5, r0, r6, EMIT_REMEMBERED_SET },
+ { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// Null termination.
- { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+ { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
+#undef REG
bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
@@ -7055,7 +7150,7 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
@@ -7311,7 +7406,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10,
+ __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
&slow_elements);
__ Ret();
}
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 3371e8a6..befd8f2d 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,6 +37,22 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
+ switch (type) {
+ case TranscendentalCache::SIN: return &sin;
+ case TranscendentalCache::COS: return &cos;
+ case TranscendentalCache::TAN: return &tan;
+ case TranscendentalCache::LOG: return &log;
+ default: UNIMPLEMENTED();
+ }
+ return NULL;
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+ return &sqrt;
+}
+
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -90,11 +106,16 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
- __ push(lr);
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
+
+ __ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
@@ -104,10 +125,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ add(lr, lr, Operand(r5, LSL, 2));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedDoubleArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
@@ -117,7 +138,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
@@ -146,6 +167,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ b(&entry);
+ __ bind(&only_change_map);
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&done);
+
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
@@ -155,10 +188,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
- __ JumpIfNotSmi(r9, &convert_hole);
+ __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
- __ SmiUntag(r9);
if (vfp3_supported) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r9);
@@ -181,6 +213,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ SmiTag(r9);
+ __ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
__ Assert(eq, "object found in smi-only array");
}
@@ -192,6 +227,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr);
+ __ bind(&done);
}
@@ -205,12 +241,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label entry, loop, convert_hole, gc_required;
+ Label entry, loop, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
__ push(lr);
__ Push(r3, r2, r1, r0);
-
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
@@ -220,10 +260,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r0, r0, Operand(r5, LSL, 1));
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
@@ -279,16 +319,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
- // Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
@@ -300,6 +330,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
@@ -325,8 +367,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ add(index, index, Operand(result, ASR, kSmiTagSize));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -336,8 +378,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result, ip);
+ __ CompareRoot(result, Heap::kEmptyStringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 83741030..96139a25 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -251,14 +251,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Calling convention for construct call (from builtins-arm.cc)
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -268,6 +260,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -----------------------------------
@@ -275,6 +268,37 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- r2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ // -- r2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+}
+
+
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 76d89541..7b2a3c4f 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -108,6 +108,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
+static const int32_t kBranchBeforeStackCheck = 0x2a000001;
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
@@ -118,10 +122,16 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
- (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
@@ -155,13 +165,21 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
- ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+4, cs);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ patcher.masm()->b(+4, cs);
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
@@ -351,7 +369,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@@ -373,16 +390,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
- input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
@@ -392,7 +406,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
@@ -404,7 +417,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// A marker value is used in place of the context.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
@@ -415,7 +427,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@@ -425,7 +436,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Number of incoming arguments.
output_offset -= kPointerSize;
- input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@@ -445,6 +455,119 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
+void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating construct stub => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = 7 * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::CONSTRUCT);
+
+ // Construct stub can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ // Constructor function being invoked by the stub.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The newly allocated object was passed as receiver in the artificial
+ // constructor stub environment created by HEnvironment::CopyForInlining().
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ construct_stub->instruction_start() +
+ isolate_->heap()->construct_stub_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@@ -557,9 +680,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
- if (is_topmost) {
- output_frame->SetRegister(cp.code(), value);
- }
+ output_frame->SetContext(value);
+ if (is_topmost) output_frame->SetRegister(cp.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 66542639..0cbd46ed 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,6 +34,7 @@
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
+#include "isolate-inl.h"
#include "parser.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -109,6 +110,13 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
+ return 24;
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -123,12 +131,12 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info) {
- ASSERT(info_ == NULL);
- info_ = info;
- scope_ = info->scope();
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -265,11 +273,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- int ignored = 0;
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
- EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored);
+ ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL);
}
VisitDeclarations(scope()->declarations());
}
@@ -310,19 +318,68 @@ void FullCodeGenerator::ClearAccumulator() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r2, Operand(profiling_counter_));
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+ // Self-optimization is a one-off thing: if it fails, don't try again.
+ reset_value = Smi::kMaxValue;
+ }
+ if (isolate()->IsDebuggerActive()) {
+ // Detect debug break requests as soon as possible.
+ reset_value = 10;
+ }
+ __ mov(r2, Operand(profiling_counter_));
+ __ mov(r3, Operand(Smi::FromInt(reset_value)));
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
+
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -344,6 +401,32 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else if (FLAG_weighted_back_edges) {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceDivisor));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ __ push(r0);
+ if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(r2);
+ __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+ } else {
+ InterruptStub stub;
+ __ CallStub(&stub);
+ }
+ __ pop(r0);
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+ }
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
@@ -706,8 +789,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
- FunctionLiteral* function,
- int* global_count) {
+ FunctionLiteral* function) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
@@ -716,7 +798,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
(mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
- ++(*global_count);
+ ++global_count_;
break;
case Variable::PARAMETER:
@@ -801,9 +883,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
}
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
@@ -865,7 +944,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
@@ -918,6 +997,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r0, null_value);
__ b(eq, &exit);
+ PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
@@ -939,48 +1020,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- Label next;
- // Preload a couple of values used in the loop.
- Register empty_fixed_array_value = r6;
- __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r7;
- __ LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- __ mov(r1, r0);
- __ bind(&next);
-
- // Check that there are no elements. Register r1 contains the
- // current JS object we've reached through the prototype chain.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ cmp(r2, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in r2 for the subsequent
- // prototype load.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
- __ JumpIfSmi(r3, &call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (r3). This is the case if the next enumeration
- // index field does not contain a smi.
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
- __ JumpIfSmi(r3, &call_runtime);
-
- // For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- __ cmp(r1, r0);
- __ b(eq, &check_prototype);
- __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ cmp(r3, empty_fixed_array_value);
- __ b(ne, &call_runtime);
-
- // Load the prototype from the map and loop if non-null.
- __ bind(&check_prototype);
- __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ cmp(r1, null_value);
- __ b(ne, &next);
+ __ CheckEnumCache(null_value, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1020,6 +1060,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
+
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Object>(
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ RecordTypeFeedbackCell(stmt->PrepareId(), cell);
+ __ LoadHeapObject(r1, cell);
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
+
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1033,6 +1083,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
// Load the current count to r0, load the length to r1.
__ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@@ -1076,7 +1127,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), r3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitAssignment(stmt->each(), stmt->AssignmentId());
+ EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
@@ -1089,7 +1140,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1097,6 +1148,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Drop(5);
// Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1190,7 +1242,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, mode);
+ CallIC(ic, mode);
}
@@ -1274,7 +1326,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
break;
}
@@ -1414,6 +1466,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ push(r1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1449,6 +1511,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1474,7 +1537,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1497,21 +1560,29 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- __ push(r1);
- VisitForStackValue(value);
- __ CallRuntime(Runtime::kDefineAccessor, 4);
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
+ __ push(r0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ ldr(r0, MemOperand(sp));
@@ -1734,7 +1805,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1742,7 +1813,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
}
@@ -1769,7 +1840,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1820,7 +1891,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
- __ tst(scratch1, Operand(scratch1));
+ __ cmp(scratch1, Operand(0));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@@ -1852,13 +1923,13 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
-void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@@ -1893,7 +1964,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1906,11 +1977,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic);
+ CallIC(ic);
break;
}
}
- PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(r0);
}
@@ -1924,7 +1994,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier.
@@ -2042,7 +2112,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2088,7 +2158,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2122,6 +2192,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
}
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id) {
+ ic_total_count_++;
+ __ Call(code, rmode, ast_id);
+}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
@@ -2139,7 +2217,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2172,7 +2250,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2379,9 +2457,23 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ // Record call targets in unoptimized code, but not in the snapshot.
+ CallFunctionFlags flags;
+ if (!Serializer::enabled()) {
+ flags = RECORD_CALL_TARGET;
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ mov(r2, Operand(cell));
+ } else {
+ flags = NO_CALL_FUNCTION_FLAGS;
+ }
+
+ CallConstructStub stub(flags);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@@ -2932,6 +3024,50 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 2);
+ ASSERT_NE(NULL, args->at(1)->AsLiteral());
+ Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label runtime, done;
+ Register object = r0;
+ Register result = r0;
+ Register scratch0 = r9;
+ Register scratch1 = r1;
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
+ __ Assert(eq, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch1, Operand(stamp));
+ __ ldr(scratch1, MemOperand(scratch1));
+ __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ cmp(scratch1, scratch0);
+ __ b(ne, &runtime);
+ __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch1);
+ __ mov(r1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3711,7 +3847,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- __ Call(ic, mode, expr->id());
+ CallIC(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3866,7 +4002,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -3977,7 +4113,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4009,7 +4145,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4026,7 +4162,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4052,7 +4188,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- __ Call(ic);
+ CallIC(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4235,7 +4371,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index dfd4d2e3..e8436578 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -399,7 +399,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
+ masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -438,7 +438,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
+ masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
}
@@ -706,7 +706,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags =
Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r0, r2, r3, r4, r5);
+ masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1312,14 +1312,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
+ Label transition_smi_elements, finish_object_store, non_double_value;
+ Label transition_double_elements;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
- Register elements = r3; // Elements array of the receiver.
+ Register receiver_map = r3;
Register elements_map = r6;
- Register receiver_map = r7;
+ Register elements = r7; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1417,9 +1419,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Ret();
__ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
- __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
@@ -1445,12 +1449,56 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
key,
receiver,
elements,
+ r3,
r4,
r5,
r6,
- r7,
- &slow);
+ &transition_double_elements);
__ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
}
@@ -1468,7 +1516,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5);
+ masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 846680f4..cdc1947d 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -440,7 +440,7 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
void LChunk::MarkEmptyBlocks() {
- HPhase phase("Mark empty blocks", this);
+ HPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@@ -476,7 +476,7 @@ void LChunk::MarkEmptyBlocks() {
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
- LInstructionGap* gap = new LInstructionGap(block);
+ LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
@@ -551,8 +551,8 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(info(), graph());
- HPhase phase("Building chunk", chunk_);
+ chunk_ = new(zone()) LChunk(info(), graph());
+ HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -581,20 +581,15 @@ void LChunkBuilder::Abort(const char* format, ...) {
}
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg));
}
@@ -609,30 +604,30 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
- new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
+ return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
}
@@ -667,7 +662,7 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new LUnallocated(LUnallocated::ANY));
+ : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
}
@@ -676,7 +671,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
- allocator_->RecordUse(value, operand);
+ operand->set_virtual_register(value->id());
return operand;
}
@@ -684,36 +679,33 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
+ result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr, int index) {
- return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+ return Define(instr,
+ new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
@@ -795,44 +787,46 @@ LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+ instr->set_pointer_map(new(zone()) LPointerMap(position_));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
+ LUnallocated* operand =
+ new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ operand->set_virtual_register(allocator_->GetVirtualRegister());
+ if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new LLabel(instr->block());
+ return new(zone()) LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new LDeoptimize);
+ return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -844,7 +838,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -878,7 +872,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
}
LInstruction* result =
- DefineAsRegister(new LShiftI(op, left, right, does_deopt));
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -891,7 +885,7 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new LArithmeticD(op, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineAsRegister(result);
}
@@ -909,7 +903,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result =
+ new(zone()) LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1005,15 +1000,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
+ ASSERT(ast_id != AstNode::kNoNumber ||
+ hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
- LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
- hydrogen_env->is_arguments_adaptor(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer);
+ LEnvironment* result = new(zone()) LEnvironment(
+ hydrogen_env->closure(),
+ hydrogen_env->frame_type(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1023,14 +1020,14 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
- if (!hydrogen_env->is_arguments_adaptor()) {
+ if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@@ -1039,7 +1036,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new LGoto(instr->FirstSuccessor()->block_id());
+ return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
}
@@ -1049,10 +1046,10 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
- return new LGoto(successor->block_id());
+ return new(zone()) LGoto(successor->block_id());
}
- LBranch* result = new LBranch(UseRegister(value));
+ LBranch* result = new(zone()) LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment.
Representation rep = value->representation();
@@ -1069,23 +1066,24 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- return new LCmpMapAndBranch(value, temp);
+ return new(zone()) LCmpMapAndBranch(value, temp);
}
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+ LOperand* value = UseRegister(instr->value());
+ return DefineAsRegister(new(zone()) LArgumentsLength(value));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new LArgumentsElements);
+ return DefineAsRegister(new(zone()) LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstanceOf* result =
- new LInstanceOf(UseFixed(instr->left(), r0),
+ new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1094,17 +1092,26 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
+ new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
+ FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseFixed(instr->length(), r2);
LOperand* elements = UseFixed(instr->elements(), r3);
- LApplyArguments* result = new LApplyArguments(function,
+ LApplyArguments* result = new(zone()) LApplyArguments(function,
receiver,
length,
elements);
@@ -1115,68 +1122,75 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = Use(instr->argument());
- return new LPushArgument(argument);
+ return new(zone()) LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+ return instr->HasNoUses()
+ ? NULL
+ : DefineAsRegister(new(zone()) LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LOuterContext(context));
+ return DefineAsRegister(new(zone()) LOuterContext(context));
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+ return MarkAsCall(new(zone()) LDeclareGlobals, instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalObject(context));
+ return DefineAsRegister(new(zone()) LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGlobalReceiver(global_object));
+ return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new LInvokeFunction(function);
+ LInvokeFunction* result = new(zone()) LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
@@ -1198,32 +1212,32 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
- LCallNew* result = new LCallNew(constructor);
+ LCallNew* result = new(zone()) LCallNew(constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1231,13 +1245,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction(function), r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
}
@@ -1263,7 +1278,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new LBitI(left, right));
+ return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
@@ -1271,7 +1286,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new LArithmeticT(instr->op(), left, right);
+ LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
@@ -1280,7 +1295,8 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LBitNotI(value));
}
@@ -1296,7 +1312,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
- DefineFixed(new LDivI(dividend, divisor), r0)));
+ DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1312,15 +1328,15 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- mod = new LModI(value, UseOrConstant(instr->right()));
+ mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
- mod = new LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
+ mod = new(zone()) LModI(dividend,
+ divisor,
+ TempRegister(),
+ FixedTemp(d10),
+ FixedTemp(d11));
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
@@ -1338,7 +1354,7 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, d1), instr);
}
}
@@ -1359,7 +1375,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- LMulI* mul = new LMulI(left, right, temp);
+ LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
AssignEnvironment(mul);
@@ -1381,7 +1397,7 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new LSubI(left, right);
+ LSubI* sub = new(zone()) LSubI(left, right);
LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1401,7 +1417,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new LAddI(left, right);
+ LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
@@ -1426,7 +1442,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
UseFixed(instr->right(), r2);
- LPower* result = new LPower(left, right);
+ LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
CAN_DEOPTIMIZE_EAGERLY);
@@ -1437,7 +1453,7 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new LRandom(global_object);
+ LRandom* result = new(zone()) LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, d7), instr);
}
@@ -1447,7 +1463,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new LCmpT(left, right);
+ LCmpT* result = new(zone()) LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1460,14 +1476,14 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new LCmpIDAndBranch(left, right);
+ return new(zone()) LCmpIDAndBranch(left, right);
}
}
@@ -1476,47 +1492,50 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return new LCmpObjectEqAndBranch(left, right);
+ return new(zone()) LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
- return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpConstantEqAndBranch(value);
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+ return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
+ return new(zone()) LIsObjectAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+ return new(zone()) LIsStringAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(instr->value()));
+ return new(zone()) LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
- TempRegister());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
}
@@ -1526,7 +1545,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
- LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+ LStringCompareAndBranch* result =
+ new(zone()) LStringCompareAndBranch(left, right);
return MarkAsCall(result, instr);
}
@@ -1534,7 +1554,8 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch(
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LHasInstanceTypeAndBranch(value);
}
@@ -1543,14 +1564,14 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LGetCachedArrayIndex(value));
+ return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
+ return new(zone()) LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
@@ -1558,40 +1579,48 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LClassOfTestAndBranch(value, TempRegister());
}
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LJSArrayLength(array));
+ return DefineAsRegister(new(zone()) LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LFixedArrayBaseLength(array));
+ return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LElementsKind(object));
+ return DefineAsRegister(new(zone()) LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LValueOf* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+ LOperand* object = UseFixed(instr->value(), r0);
+ LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- UseRegister(instr->length())));
+ LOperand* value = UseRegisterAtStart(instr->index());
+ LOperand* length = UseRegister(instr->length());
+ return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@@ -1604,7 +1633,7 @@ LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new LThrow(value), instr);
+ return MarkAsCall(new(zone()) LThrow(value), instr);
}
@@ -1627,22 +1656,25 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
if (!needs_check) {
- res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
: NULL;
- res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
+ res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
+ temp1,
+ temp2,
+ temp3));
res = AssignEnvironment(res);
}
return res;
@@ -1656,32 +1688,31 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
+ LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LDoubleToI* res =
- new LDoubleToI(value,
- TempRegister(),
- instr->CanTruncateToInt32() ? TempRegister() : NULL);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
+ LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
- LOperand* value = UseRegister(val);
+ LOperand* value = UseRegisterAtStart(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
- LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LNumberTagI* result = new(zone()) LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else {
ASSERT(to.IsDouble());
LOperand* value = Use(instr->value());
- return DefineAsRegister(new LInteger32ToDouble(value));
+ return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
UNREACHABLE();
@@ -1691,13 +1722,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckNonSmi(value));
+ return AssignEnvironment(new(zone()) LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckInstanceType(value);
+ LInstruction* result = new(zone()) LCheckInstanceType(value);
return AssignEnvironment(result);
}
@@ -1705,26 +1736,26 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
+ LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value));
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckFunction(value));
+ return AssignEnvironment(new(zone()) LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckMap(value);
+ LInstruction* result = new(zone()) LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1734,57 +1765,32 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
} else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new LClampIToUint8(reg));
+ return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
- LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
return AssignEnvironment(DefineAsRegister(result));
}
}
-LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
- return AssignEnvironment(DefineAsRegister(res));
- } else if (input_rep.IsInteger32()) {
- // Canonicalization should already have removed the hydrogen instruction in
- // this case, since it is a noop.
- UNREACHABLE();
- return NULL;
- } else {
- ASSERT(input_rep.IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* temp3 = FixedTemp(d11);
- LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
- return AssignEnvironment(DefineSameAsFirst(res));
- }
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new LReturn(UseFixed(instr->value(), r0));
+ return new(zone()) LReturn(UseFixed(instr->value(), r0));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- return DefineAsRegister(new LConstantI);
+ return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
- return DefineAsRegister(new LConstantD);
+ return DefineAsRegister(new(zone()) LConstantD);
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT);
+ return DefineAsRegister(new(zone()) LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1793,7 +1799,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new LLoadGlobalCell;
+ LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
@@ -1802,7 +1808,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+ LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1812,8 +1818,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
// Use a temp to check the value in the cell in the case where we perform
// a hole check.
return instr->RequiresHoleCheck()
- ? AssignEnvironment(new LStoreGlobalCell(value, TempRegister()))
- : new LStoreGlobalCell(value, NULL);
+ ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+ : new(zone()) LStoreGlobalCell(value, NULL);
}
@@ -1821,14 +1827,15 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), r1);
LOperand* value = UseFixed(instr->value(), r0);
LStoreGlobalGeneric* result =
- new LStoreGlobalGeneric(global_object, value);
+ new(zone()) LStoreGlobalGeneric(global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1843,14 +1850,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
- LInstruction* result = new LStoreContextSlot(context, value);
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
- new LLoadNamedField(UseRegisterAtStart(instr->object())));
+ new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
}
@@ -1859,11 +1866,13 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, r0), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1871,7 +1880,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
+ LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
return MarkAsCall(result, instr);
}
@@ -1879,20 +1888,20 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
- new LLoadFunctionPrototype(UseRegister(instr->function()))));
+ new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadElements(input));
+ return DefineAsRegister(new(zone()) LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadExternalArrayPointer(input));
+ return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
}
@@ -1902,7 +1911,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
@@ -1915,7 +1924,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
- new LLoadKeyedFastDoubleElement(elements, key);
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
@@ -1934,7 +1943,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
- new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@@ -1948,7 +1957,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), r0);
LInstruction* result =
- DefineFixed(new LLoadKeyedGeneric(object, key), r0);
+ DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
return MarkAsCall(result, instr);
}
@@ -1967,7 +1976,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new LStoreKeyedFastElement(obj, key, val);
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
@@ -1981,7 +1990,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new LStoreKeyedFastDoubleElement(elements, key, val);
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
@@ -2007,9 +2016,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstant(instr->key());
- return new LStoreKeyedSpecializedArrayElement(external_pointer,
- key,
- val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2022,7 +2031,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
- return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+ return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
}
@@ -2033,14 +2042,16 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
- new LTransitionElementsKind(object, new_map_reg, NULL);
+ new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* fixed_object_reg = FixedTemp(r2);
LOperand* new_map_reg = FixedTemp(r3);
LTransitionElementsKind* result =
- new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
+ new(zone()) LTransitionElementsKind(object,
+ new_map_reg,
+ fixed_object_reg);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
@@ -2057,7 +2068,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- return new LStoreNamedField(obj, val);
+ return new(zone()) LStoreNamedField(obj, val);
}
@@ -2065,7 +2076,7 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
- LInstruction* result = new LStoreNamedGeneric(obj, val);
+ LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
@@ -2073,61 +2084,67 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
+ instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LStringLength(string));
+ return DefineAsRegister(new(zone()) LStringLength(string));
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
+LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
+ return AssignPointerMap(DefineAsRegister(result));
}
-LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr);
+LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
+ return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
}
-LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
- HObjectLiteralGeneric* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new LDeleteProperty(object, key);
+ LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2135,13 +2152,13 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new LOsrEntry);
+ return AssignEnvironment(new(zone()) LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new LParameter, spill_index);
+ return DefineAsSpilled(new(zone()) LParameter, spill_index);
}
@@ -2151,13 +2168,13 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
- return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+ return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
+ return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
}
@@ -2174,32 +2191,33 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ LAccessArgumentsAt* result =
+ new(zone()) LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new LToFastProperties(object);
+ LToFastProperties* result = new(zone()) LToFastProperties(object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
+ LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
+ return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
- return new LIsConstructCallAndBranch(TempRegister());
+ return new(zone()) LIsConstructCallAndBranch(TempRegister());
}
@@ -2222,7 +2240,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new LLazyBailout;
+ LInstruction* result = new(zone()) LLazyBailout;
result = AssignEnvironment(result);
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
@@ -2236,10 +2254,10 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
- return MarkAsCall(new LStackCheck, instr);
+ return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
}
}
@@ -2251,7 +2269,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind());
+ instr->call_kind(),
+ instr->is_construct());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2269,9 +2288,37 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new LIn(key, object);
+ LIn* result = new(zone()) LIn(key, object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+ LOperand* object = UseFixed(instr->enumerable(), r0);
+ LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
+ return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+ LOperand* map = UseRegister(instr->map());
+ return AssignEnvironment(DefineAsRegister(
+ new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* map = UseRegisterAtStart(instr->map());
+ return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* index = UseRegister(instr->index());
+ return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+}
+
+
} } // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index d3aff76e..62cde6e2 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -87,11 +88,13 @@ class LCodeGen;
V(ConstantI) \
V(ConstantT) \
V(Context) \
+ V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
+ V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -134,8 +137,7 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteralFast) \
- V(ObjectLiteralGeneric) \
+ V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -171,7 +173,13 @@ class LCodeGen;
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
- V(ValueOf)
+ V(ValueOf) \
+ V(ForInPrepareMap) \
+ V(ForInCacheArray) \
+ V(CheckMapValue) \
+ V(LoadFieldByIndex) \
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -461,6 +469,20 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
@@ -983,6 +1005,41 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
+class LDateField: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+ inputs_[0] = date;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ Smi* index() const { return index_; }
+
+ private:
+ Smi* index_;
+};
+
+
+class LSetDateField: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
+ : index_(index) {
+ inputs_[0] = date;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ int index() const { return index_; }
+
+ private:
+ int index_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1346,6 +1403,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
};
+class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+ DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
@@ -1909,24 +1973,36 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+ LAllocateObject(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
+ DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
-class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+ DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
};
-class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
@@ -2056,6 +2132,62 @@ class LIn: public LTemplateInstruction<1, 2, 0> {
};
+class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInPrepareMap(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LForInCacheArray(LOperand* map) {
+ inputs_[0] = map;
+ }
+
+ LOperand* map() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+ int idx() {
+ return HForInCacheArray::cast(this->hydrogen_value())->idx();
+ }
+};
+
+
+class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LCheckMapValue(LOperand* value, LOperand* map) {
+ inputs_[0] = value;
+ inputs_[1] = map;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* map() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadFieldByIndex(LOperand* object, LOperand* index) {
+ inputs_[0] = object;
+ inputs_[1] = index;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@@ -2123,6 +2255,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
+ zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@@ -2152,6 +2285,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@@ -2161,7 +2295,6 @@ class LChunkBuilder BASE_EMBEDDED {
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2212,8 +2345,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2259,6 +2390,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
+ Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 76c8443e..82b80a2b 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -62,7 +62,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
- HPhase phase("Code generation", chunk());
+ HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
@@ -479,10 +479,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- if (environment->is_arguments_adaptor()) {
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- } else {
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ switch (environment->frame_type()) {
+ case JS_FUNCTION:
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ break;
+ case JS_CONSTRUCT:
+ translation->BeginConstructStubFrame(closure_id, translation_size);
+ break;
+ case ARGUMENTS_ADAPTOR:
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ break;
+ default:
+ UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@@ -619,7 +627,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
- if (!e->is_arguments_adaptor()) {
+ if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
@@ -640,7 +648,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
- ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -673,7 +680,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
- ASSERT(FLAG_deopt);
Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED);
@@ -1431,6 +1437,46 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
+void LCodeGen::DoDateField(LDateField* instr) {
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Smi* index = instr->index();
+ Label runtime, done;
+ ASSERT(object.is(result));
+ ASSERT(object.is(r0));
+ ASSERT(!scratch.is(scratch0()));
+ ASSERT(!scratch.is(object));
+
+#ifdef DEBUG
+ __ AbortIfSmi(object);
+ __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
+ __ Assert(eq, "Trying to get date field from non-date.");
+#endif
+
+ if (index->value() == 0) {
+ __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
+ } else {
+ if (index->value() < JSDate::kFirstUncachedField) {
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+ __ mov(scratch, Operand(stamp));
+ __ ldr(scratch, MemOperand(scratch));
+ __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ cmp(scratch, scratch0());
+ __ b(ne, &runtime);
+ __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()));
+ __ jmp(&done);
+ }
+ __ bind(&runtime);
+ __ PrepareCallCFunction(2, scratch);
+ __ mov(r1, Operand(index));
+ __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+ __ bind(&done);
+ }
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -2753,15 +2799,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2802,6 +2843,18 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ ldr(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(r0)); // Used for parameter count.
+ ASSERT(function.is(r1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(r0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@@ -2874,6 +2927,16 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
}
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+ __ push(cp); // The context is the first argument.
+ __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
+ __ push(scratch0());
+ __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+ __ push(scratch0());
+ CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -3213,15 +3276,62 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
+ class DeferredDoRandom: public LDeferredCode {
+ public:
+ DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LRandom* instr_;
+ };
+
+ DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
+
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(d7));
ASSERT(ToRegister(instr->InputAt(0)).is(r0));
- __ PrepareCallCFunction(1, scratch0());
+ static const int kSeedSize = sizeof(uint32_t);
+ STATIC_ASSERT(kPointerSize == kSeedSize);
+
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+ static const int kRandomSeedOffset =
+ FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
+ // r2: FixedArray of the global context's random seeds
+
+ // Load state[0].
+ __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+ __ cmp(r1, Operand(0));
+ __ b(eq, deferred->entry());
+ // Load state[1].
+ __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+ // r1: state[0].
+ // r0: state[1].
+
+ // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
+ __ and_(r3, r1, Operand(0xFFFF));
+ __ mov(r4, Operand(18273));
+ __ mul(r3, r3, r4);
+ __ add(r1, r3, Operand(r1, LSR, 16));
+ // Save state[0].
+ __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
+
+ // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
+ __ and_(r3, r0, Operand(0xFFFF));
+ __ mov(r4, Operand(36969));
+ __ mul(r3, r3, r4);
+ __ add(r0, r3, Operand(r0, LSR, 16));
+ // Save state[1].
+ __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
+
+ // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
+ __ and_(r0, r0, Operand(0x3FFFF));
+ __ add(r0, r0, Operand(r1, LSL, 14));
+ __ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@@ -3236,6 +3346,13 @@ void LCodeGen::DoRandom(LRandom* instr) {
}
+void LCodeGen::DoDeferredRandom(LRandom* instr) {
+ __ PrepareCallCFunction(1, scratch0());
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+ // Return value is in r0.
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3376,9 +3493,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ mov(r0, Operand(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3796,12 +3913,11 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LNumberTagI* instr_;
};
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
- __ SmiTag(reg, SetCC);
+ __ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
}
@@ -3809,7 +3925,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
@@ -3820,14 +3937,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
Label done;
- __ SmiUntag(reg);
- __ eor(reg, reg, Operand(0x80000000));
- __ vmov(flt_scratch, reg);
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ eor(src, src, Operand(0x80000000));
+ }
+ __ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- if (!reg.is(r5)) __ mov(reg, r5);
+ __ Move(dst, r5);
__ b(&done);
}
@@ -3838,16 +3957,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(ip, Operand(0));
- __ StoreToSafepointRegisterSlot(ip, reg);
+ __ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(r0)) __ mov(reg, r0);
+ __ Move(dst, r0);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sub(ip, reg, Operand(kHeapObjectTag));
+ __ sub(ip, dst, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ StoreToSafepointRegisterSlot(reg, reg);
+ __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -3895,23 +4014,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(ToRegister(input), SetCC);
+ __ SmiUntag(result, input, SetCC);
DeoptimizeIf(cs, instr->environment());
} else {
- __ SmiUntag(ToRegister(input));
+ __ SmiUntag(result, input);
}
}
@@ -3928,7 +4045,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3967,10 +4084,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to double register conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ vmov(flt_scratch, input_reg);
+ // scratch: untagged value of input_reg
+ __ vmov(flt_scratch, scratch);
__ vcvt_f64_s32(result_reg, flt_scratch);
- __ SmiTag(input_reg); // Retag smi.
__ bind(&done);
}
@@ -4256,7 +4372,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
- __ JumpIfSmi(input_reg, &is_smi);
+ __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
// Check for heap number
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4279,7 +4395,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// smi
__ bind(&is_smi);
- __ SmiUntag(result_reg, input_reg);
__ ClampUint8(result_reg, result_reg);
__ bind(&done);
@@ -4315,6 +4430,80 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
+void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
+ class DeferredAllocateObject: public LDeferredCode {
+ public:
+ DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocateObject* instr_;
+ };
+
+ DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch2 = ToRegister(instr->TempAt(1));
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+ Handle<Map> initial_map(constructor->initial_map());
+ int instance_size = initial_map->instance_size();
+ ASSERT(initial_map->pre_allocated_property_fields() +
+ initial_map->unused_property_fields() -
+ initial_map->inobject_properties() == 0);
+
+ // Allocate memory for the object. The initial map might change when
+ // the constructor's prototype changes, but instance size and property
+ // counts remain unchanged (if slack tracking finished).
+ ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
+ __ AllocateInNewSpace(instance_size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ TAG_OBJECT);
+
+ // Load the initial map.
+ Register map = scratch;
+ __ LoadHeapObject(map, constructor);
+ __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Initialize map and fields of the newly allocated object.
+ ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+ __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
+ __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ if (initial_map->inobject_properties() != 0) {
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < initial_map->inobject_properties(); i++) {
+ int property_offset = JSObject::kHeaderSize + i * kPointerSize;
+ __ str(scratch, FieldMemOperand(result, property_offset));
+ }
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
+ Register result = ToRegister(instr->result());
+ Handle<JSFunction> constructor = instr->hydrogen()->constructor();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(0));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ LoadHeapObject(r0, constructor);
+ __ push(r0);
+ CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =
@@ -4373,26 +4562,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!source.is(r2));
ASSERT(!result.is(r2));
+ // Only elements backing stores for non-COW arrays need to be copied.
+ Handle<FixedArrayBase> elements(object->elements());
+ bool has_elements = elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map();
+
// Increase the offset so that subsequent objects end up right after
- // this one.
- int current_offset = *offset;
- int size = object->map()->instance_size();
- *offset += size;
+ // this object and its backing store.
+ int object_offset = *offset;
+ int object_size = object->map()->instance_size();
+ int elements_offset = *offset + object_size;
+ int elements_size = has_elements ? elements->Size() : 0;
+ *offset += object_size + elements_size;
// Copy object header.
ASSERT(object->properties()->length() == 0);
- ASSERT(object->elements()->length() == 0 ||
- object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties();
- int header_size = size - inobject_properties * kPointerSize;
+ int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, current_offset + i));
+ if (has_elements && i == JSObject::kElementsOffset) {
+ __ add(r2, result, Operand(elements_offset));
+ } else {
+ __ ldr(r2, FieldMemOperand(source, i));
+ }
+ __ str(r2, FieldMemOperand(result, object_offset + i));
}
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
- int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@@ -4408,10 +4606,58 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ str(r2, FieldMemOperand(result, total_offset));
}
}
+
+ if (has_elements) {
+ // Copy elements backing store header.
+ __ LoadHeapObject(source, elements);
+ for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
+ __ ldr(r2, FieldMemOperand(source, i));
+ __ str(r2, FieldMemOperand(result, elements_offset + i));
+ }
+
+ // Copy elements backing store content.
+ int elements_length = has_elements ? elements->length() : 0;
+ if (elements->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+ // We only support little endian mode...
+ int32_t value_low = value & 0xFFFFFFFF;
+ int32_t value_high = value >> 32;
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(r2, Operand(value_low));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ mov(r2, Operand(value_high));
+ __ str(r2, FieldMemOperand(result, total_offset + 4));
+ }
+ } else if (elements->IsFixedArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+ Handle<Object> value = JSObject::GetElement(object, i);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ add(r2, result, Operand(*offset));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ } else {
+ __ mov(r2, Operand(value));
+ __ str(r2, FieldMemOperand(result, total_offset));
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
}
-void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size();
// Allocate all objects that are part of the literal in one big
@@ -4433,12 +4679,13 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
}
-void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(r4, literals);
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
@@ -4447,7 +4694,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
__ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1);
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
@@ -4802,6 +5049,88 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
+ DeoptimizeIf(eq, instr->environment());
+
+ Register null_value = r5;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmp(r0, null_value);
+ DeoptimizeIf(eq, instr->environment());
+
+ __ tst(r0, Operand(kSmiTagMask));
+ DeoptimizeIf(eq, instr->environment());
+
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ DeoptimizeIf(le, instr->environment());
+
+ Label use_cache, call_runtime;
+ __ CheckEnumCache(null_value, &call_runtime);
+
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r0);
+ CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+ __ cmp(r1, ip);
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+ Register map = ToRegister(instr->map());
+ Register result = ToRegister(instr->result());
+ __ LoadInstanceDescriptors(map, result);
+ __ ldr(result,
+ FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
+ __ ldr(result,
+ FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ cmp(result, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+ Register object = ToRegister(instr->value());
+ Register map = ToRegister(instr->map());
+ __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ cmp(map, scratch0());
+ DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ Register object = ToRegister(instr->object());
+ Register index = ToRegister(instr->index());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ Label out_of_object, done;
+ __ cmp(index, Operand(0));
+ __ b(lt, &out_of_object);
+
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+ __ b(&done);
+
+ __ bind(&out_of_object);
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ // Index is equal to negated out of object property index plus 1.
+ __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result, FieldMemOperand(scratch,
+ FixedArray::kHeaderSize - kPointerSize));
+ __ bind(&done);
+}
#undef __
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 00823e16..adb6e1bb 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -114,8 +114,10 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
+ void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+ void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 9894ff20..857c2bf7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1188,8 +1188,7 @@ void MacroAssembler::DebugBreak() {
#endif
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type,
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
@@ -1201,28 +1200,20 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
// For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
// We will build up the handler from the bottom by pushing on the stack.
- // First compute the state.
- unsigned state = StackHandler::OffsetField::encode(handler_index);
- if (try_location == IN_JAVASCRIPT) {
- state |= (type == TRY_CATCH_HANDLER)
- ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
- : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
- state |= StackHandler::KindField::encode(StackHandler::ENTRY);
- }
-
// Set up the code object (r5) and the state (r6) for pushing.
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
mov(r5, Operand(CodeObject()));
mov(r6, Operand(state));
// Push the frame pointer, context, state, and code object.
- if (try_location == IN_JAVASCRIPT) {
- stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
- } else {
+ if (kind == StackHandler::JS_ENTRY) {
mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ } else {
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
// Link the current handler as the next handler.
@@ -1290,8 +1281,7 @@ void MacroAssembler::Throw(Register value) {
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -1301,24 +1291,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(r0, Operand(false, RelocInfo::NONE));
- mov(r2, Operand(external_caught));
- str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- str(r0, MemOperand(r2));
- } else if (!value.is(r0)) {
+ if (!value.is(r0)) {
mov(r0, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
@@ -1330,7 +1305,7 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
- STATIC_ASSERT(StackHandler::ENTRY == 0);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
tst(r2, Operand(StackHandler::KindField::kMask));
b(ne, &fetch_next);
@@ -2879,6 +2854,47 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ cmp(map_in_out, ip);
+ b(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ ldr(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ map_out,
+ scratch,
+ &done);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2939,6 +2955,22 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
}
+void MacroAssembler::UntagAndJumpIfSmi(
+ Register dst, Register src, Label* smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ b(cc, smi_case); // Shifter carry is not set for a smi.
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(
+ Register dst, Register src, Label* non_smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ b(cs, non_smi_case); // Shifter carry is set for a non-smi.
+}
+
+
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
@@ -3615,8 +3647,8 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
bind(&in_bounds);
Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(s0, temp_double_reg);
- vmov(result_reg, s0);
+ vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
+ vmov(result_reg, temp_double_reg.low());
bind(&done);
}
@@ -3632,6 +3664,52 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Label next;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = r6;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r7;
+ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ mov(r1, r0);
+ bind(&next);
+
+ // Check that there are no elements. Register r1 contains the
+ // current JS object we've reached through the prototype chain.
+ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ cmp(r2, empty_fixed_array_value);
+ b(ne, call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r2 for the subsequent
+ // prototype load.
+ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+ JumpIfSmi(r3, call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (r3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+ JumpIfSmi(r3, call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ cmp(r1, r0);
+ b(eq, &check_prototype);
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ cmp(r3, empty_fixed_array_value);
+ b(ne, call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ bind(&check_prototype);
+ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+ cmp(r1, null_value);
+ b(ne, &next);
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 60c2e6f6..47afa93a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -491,6 +491,22 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the global context if the map in register
+ // map_in_out is the cached Array map in the global context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out);
+
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
@@ -566,20 +582,18 @@ class MacroAssembler: public Assembler {
// Exception handling
// Push a new try handler and link into try handler chain.
- void PushTryHandler(CodeLocation try_location,
- HandlerType type,
- int handler_index);
+ void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
- // Passes thrown value (in r0) to the handler of top of the try handler chain.
+ // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@@ -787,7 +801,8 @@ class MacroAssembler: public Assembler {
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
+ // the FastDoubleElements array elements. Otherwise jump to fail, in which
+ // case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
@@ -1144,6 +1159,14 @@ class MacroAssembler: public Assembler {
mov(dst, Operand(src, ASR, kSmiTagSize), s);
}
+ // Untag the source value into destination and jump if source is a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
@@ -1236,6 +1259,10 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ // Expects object in r0 and returns map with validated enum cache
+ // in r0. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Register null_value, Label* call_runtime);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index b212f9f6..10ff2dd9 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -472,7 +472,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
- ASSERT(minus < String::kMaxUC16CharCode);
+ ASSERT(minus < String::kMaxUtf16CodeUnit);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));
@@ -571,7 +571,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
BranchOrBacktrack(eq, on_no_match);
return true;
}
@@ -585,7 +585,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) {
__ bind(&done);
@@ -681,7 +681,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
- __ tst(r1, Operand(r1));
+ __ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
@@ -1055,7 +1055,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
- MaybeObject* result = Execution::HandleStackGuardInterrupt();
+ MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 1ae172c0..629c209e 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1277,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 512 bytes to prevent overrunning the stack when
+ // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 512;
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 33fbee52..06f8385a 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -43,47 +43,83 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
+ Register receiver,
Register name,
+ // Number of the cache entry, not scaled.
Register offset,
Register scratch,
- Register scratch2) {
+ Register scratch2,
+ Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+ uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+ ASSERT(map_off_addr > key_off_addr);
+ ASSERT((map_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
- Register offsets_base_addr = scratch;
+ Register base_addr = scratch;
+ scratch = no_reg;
+
+ // Multiply by 3 because there are 3 fields per entry (name, code, map).
+ __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+ // Calculate the base address of the entry.
+ __ mov(base_addr, Operand(key_offset));
+ __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
// Check that the key in the entry matches the name.
- __ mov(offsets_base_addr, Operand(key_offset));
- __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
+ __ ldr(ip, MemOperand(base_addr, 0));
__ cmp(name, ip);
__ b(ne, &miss);
+ // Check the map matches.
+ __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+ __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(ip, scratch2);
+ __ b(ne, &miss);
+
// Get the code entry from the cache.
- __ add(offsets_base_addr, offsets_base_addr,
- Operand(value_off_addr - key_off_addr));
- __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
+ Register code = scratch2;
+ scratch2 = no_reg;
+ __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
- __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
- __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
- __ cmp(scratch2, Operand(flags));
+ Register flags_reg = base_addr;
+ base_addr = no_reg;
+ __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+ // It's a nice optimization if this constant is encodable in the bic insn.
+
+ uint32_t mask = Code::kFlagsNotUsedInLookup;
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
+ __ bic(flags_reg, flags_reg, Operand(mask));
+ // Using cmn and the negative instead of cmp means we can use movw.
+ if (flags < 0) {
+ __ cmn(flags_reg, Operand(-flags));
+ } else {
+ __ cmp(flags_reg, Operand(flags));
+ }
__ b(ne, &miss);
- // Re-load code entry from cache.
- __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ __ jmp(&miss);
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ __ jmp(&miss);
+ }
+#endif
// Jump to the first instruction in the code stub.
- __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(offset);
+ __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
// Miss: fall through.
__ bind(&miss);
@@ -155,13 +191,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
- Register extra2) {
+ Register extra2,
+ Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 8.
- ASSERT(sizeof(Entry) == 8);
+ // Make sure that code is valid. The multiplying code relies on the
+ // entry size being 12.
+ ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -181,6 +218,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
+ ASSERT(!extra3.is(no_reg));
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
+ extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@@ -189,27 +231,51 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
- __ eor(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ uint32_t mask = kPrimaryTableSize - 1;
+ // We shift out the last two bits because they are not part of the hash and
+ // they are always 01 for maps.
+ __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
+ // Mask down the eor argument to the minimum to keep the immediate
+ // ARM-encodable.
+ __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ and_(scratch, scratch, Operand(mask));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kPrimary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name));
- __ add(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
+ uint32_t mask2 = kSecondaryTableSize - 1;
+ __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
+ __ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kSecondary,
+ receiver,
+ name,
+ scratch,
+ extra,
+ extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
+ extra2, extra3);
}
@@ -1321,14 +1387,8 @@ void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
// Get the receiver from the stack.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, miss);
- }
-
// Check that the maps haven't changed.
+ __ JumpIfSmi(r0, miss);
CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
}
@@ -1445,28 +1505,30 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Ret();
} else {
Label call_builtin;
- Register elements = r3;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
+ Register elements = r6;
+ Register end_elements = r5;
+ // Get the elements array of the object.
+ __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
- // Get the element's length.
+ // Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
@@ -1481,7 +1543,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Push the element.
+ // Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1496,13 +1558,33 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&with_write_barrier);
- __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r6, r6, &call_builtin);
+ __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ Label fast_object, not_fast_object;
+ __ CheckFastObjectElements(r3, r7, &not_fast_object);
+ __ jmp(&fast_object);
+ // In case of fast smi-only, convert to fast object, otherwise bail out.
+ __ bind(&not_fast_object);
+ __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
+ // edx: receiver
+ // r3: map
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r3,
+ r7,
+ &call_builtin);
+ __ mov(r2, receiver);
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ __ bind(&fast_object);
+ } else {
+ __ CheckFastObjectElements(r3, r3, &call_builtin);
+ }
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- // Push the element.
+ // Store the value.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1548,25 +1630,25 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r6, MemOperand(r7));
- __ cmp(end_elements, r6);
+ __ ldr(r3, MemOperand(r7));
+ __ cmp(end_elements, r3);
__ b(ne, &call_builtin);
__ mov(r9, Operand(new_space_allocation_limit));
__ ldr(r9, MemOperand(r9));
- __ add(r6, r6, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r6, r9);
+ __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
+ __ cmp(r3, r9);
__ b(hi, &call_builtin);
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r6, MemOperand(r7));
+ __ str(r3, MemOperand(r7));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r6, MemOperand(end_elements, i * kPointerSize));
+ __ str(r3, MemOperand(end_elements, i * kPointerSize));
}
// Update elements' and array's sizes.
@@ -2725,14 +2807,8 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
// -----------------------------------
Label miss;
- // If the object is the holder then we know that it's a global
- // object which can only happen for contextual calls. In this case,
- // the receiver cannot be a smi.
- if (!object.is_identical_to(holder)) {
- __ JumpIfSmi(r0, &miss);
- }
-
// Check that the map of the global has not changed.
+ __ JumpIfSmi(r0, &miss);
CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
// Get the value from the cell.
@@ -3024,7 +3100,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@@ -4069,7 +4145,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4078,13 +4155,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
+ Register scratch = r4;
+ Register elements_reg = r3;
+ Register length_reg = r5;
+ Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4092,16 +4172,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ }
// Check that the key is within bounds.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
@@ -4109,10 +4186,21 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
}
// Compare smis.
__ cmp(key_reg, scratch);
- __ b(hs, &miss_force_generic);
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ // Make sure elements is a fast element array, not 'cow'.
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4150,12 +4238,80 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+ TAG_OBJECT);
+
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+ }
+
+ // Store the element at index zero.
+ __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ Ret();
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedCOWArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(length_reg, scratch);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4165,7 +4321,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
@@ -4175,6 +4332,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
+ Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4193,8 +4351,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
- __ b(hs, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
@@ -4215,6 +4378,73 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(value_reg, &value_is_smi);
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+ TAG_OBJECT);
+
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch1,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // Make sure that the backing store can hold additional elements.
+ __ ldr(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ cmp(length_reg, scratch1);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}