summaryrefslogtreecommitdiffstats
path: root/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'src/arm')
-rw-r--r--src/arm/assembler-arm-inl.h23
-rw-r--r--src/arm/assembler-arm.cc35
-rw-r--r--src/arm/assembler-arm.h9
-rw-r--r--src/arm/builtins-arm.cc25
-rw-r--r--src/arm/codegen-arm.cc710
-rw-r--r--src/arm/codegen-arm.h25
-rw-r--r--src/arm/debug-arm.cc8
-rw-r--r--src/arm/disasm-arm.cc8
-rw-r--r--src/arm/fast-codegen-arm.cc241
-rw-r--r--src/arm/full-codegen-arm.cc196
-rw-r--r--src/arm/ic-arm.cc79
-rw-r--r--src/arm/macro-assembler-arm.cc31
-rw-r--r--src/arm/macro-assembler-arm.h4
-rw-r--r--src/arm/simulator-arm.cc10
-rw-r--r--src/arm/stub-cache-arm.cc42
15 files changed, 804 insertions, 642 deletions
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 5be57709..f72ad76a 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -190,6 +190,29 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
}
+template<typename StaticVisitor>
+void RelocInfo::Visit() {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (Debug::has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b1705df9..6df6411d 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1820,6 +1820,7 @@ void Assembler::vldr(const DwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
@@ -1836,6 +1837,7 @@ void Assembler::vldr(const SwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@@ -1852,11 +1854,29 @@ void Assembler::vstr(const DwVfpRegister src,
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
+void Assembler::vstr(const SwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = SSrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1010(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ ASSERT((offset / 4) < 256);
+ ASSERT(offset >= 0);
+ emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ 0xA*B8 | ((offset / 4) & 255));
+}
+
+
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@@ -2276,6 +2296,21 @@ void Assembler::vcmp(const DwVfpRegister src1,
}
+void Assembler::vcmp(const DwVfpRegister src1,
+ const double src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(src2 == 0.0);
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6);
+}
+
+
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 16e69e29..cc6ec054 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -966,6 +966,11 @@ class Assembler : public Malloced {
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
+ void vstr(const SwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+
void vmov(const DwVfpRegister dst,
double imm,
const Condition cond = al);
@@ -1031,6 +1036,10 @@ class Assembler : public Malloced {
const DwVfpRegister src2,
const SBit s = LeaveCC,
const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const double src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
void vsqrt(const DwVfpRegister dst,
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index b1f29ba3..7e7e358c 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -911,6 +911,29 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(r1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore saved function.
+ __ pop(r1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(r2);
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
@@ -1050,7 +1073,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4bcf1a07..15f34b59 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -217,93 +217,80 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
#endif
- if (info->mode() == CompilationInfo::PRIMARY) {
- frame_->Enter();
- // tos: code slot
-
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
-
- frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ frame_->Enter();
+ // tos: code slot
+
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ frame_->AssertIsSpilled();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, cp);
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
+ JumpTarget verified_true;
+ __ cmp(r0, cp);
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- frame_->AssertIsSpilled();
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope()->is_global_scope()); // No params in global scope.
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(r2, Operand(slot_offset), r3, r1);
- }
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ frame_->AssertIsSpilled();
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ ASSERT(!scope()->is_global_scope()); // No params in global scope.
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
+ }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
- }
- } else {
- // When used as the secondary compiler for splitting, r1, cp,
- // fp, and lr have been pushed on the stack. Adjust the virtual
- // frame to match this state.
- frame_->Adjust(4);
-
- // Bind all the bailout labels to the beginning of the function.
- List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
- for (int i = 0; i < bailouts->length(); i++) {
- __ bind(bailouts->at(i)->label());
- }
+ // Initialize ThisFunction reference if present.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -532,6 +519,10 @@ void CodeGenerator::LoadCondition(Expression* x,
void CodeGenerator::Load(Expression* expr) {
+ // We generally assume that we are not in a spilled scope for most
+ // of the code generator. A failure to ensure this caused issue 815
+ // and this assert is designed to catch similar issues.
+ frame_->AssertIsNotSpilled();
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -688,6 +679,10 @@ Reference::Reference(CodeGenerator* cgen,
expression_(expression),
type_(ILLEGAL),
persist_after_get_(persist_after_get) {
+ // We generally assume that we are not in a spilled scope for most
+ // of the code generator. A failure to ensure this caused issue 815
+ // and this assert is designed to catch similar issues.
+ cgen->frame()->AssertIsNotSpilled();
cgen->LoadReference(this);
}
@@ -784,12 +779,26 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
__ tst(tos, Operand(kSmiTagMask));
true_target->Branch(eq);
- // Slow case: call the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ // Slow case.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Implements the slow case by using ToBooleanStub.
+ // The ToBooleanStub takes a single argument, and
+ // returns a non-zero value for true, or zero for false.
+ // Both the argument value and the return value use the
+ // register assigned to tos_
+ ToBooleanStub stub(tos);
+ frame_->CallStub(&stub, 0);
+ // Convert the result in "tos" to a condition code.
+ __ cmp(tos, Operand(0));
+ } else {
+ // Implements slow case by calling the runtime.
+ frame_->EmitPush(tos);
+ frame_->CallRuntime(Runtime::kToBool, 1);
+ // Convert the result (r0) to a condition code.
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
}
cc_reg_ = ne;
@@ -1213,7 +1222,21 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
- TypeInfo result = TypeInfo::Integer32();
+ TypeInfo result =
+ (op == Token::SAR) ? TypeInfo::Integer32() : TypeInfo::Number();
+ if (!reversed) {
+ if (op == Token::SHR) {
+ if (int_value >= 2) {
+ result = TypeInfo::Smi();
+ } else if (int_value >= 1) {
+ result = TypeInfo::Integer32();
+ }
+ } else {
+ if (int_value >= 1) {
+ result = TypeInfo::Smi();
+ }
+ }
+ }
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@@ -1532,9 +1555,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ BranchOnSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
__ cmp(r1, Operand(apply_code));
__ b(ne, &build_args);
@@ -1899,19 +1921,17 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- frame_->SpillAll();
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
Load(node->expression());
+ frame_->PopToR0();
+ frame_->PrepareForReturn();
if (function_return_is_shadowed_) {
- frame_->EmitPop(r0);
function_return_.Jump();
} else {
// Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge.
- frame_->PopToR0();
- frame_->PrepareForReturn();
if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the
// code by jumping to the return site.
@@ -2307,7 +2327,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
@@ -2321,6 +2340,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the object to enumerate over (converted to JSObject).
Load(node->enumerable());
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
frame_->EmitPop(r0);
@@ -2482,36 +2502,39 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
- __ mov(r3, Operand(r0));
-
+ __ mov(r3, Operand(r0), SetCC);
// If the property has been removed while iterating, we just skip it.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r3, ip);
node->continue_target()->Branch(eq);
end_del_check.Bind();
// Store the entry in the 'each' expression and take another spin in the
// loop. r3: i'th entry of the enum cache (or string there of)
frame_->EmitPush(r3); // push entry
- { Reference each(this, node->each());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ Reference each(this, node->each());
if (!each.is_illegal()) {
if (each.size() > 0) {
+ // Loading a reference may leave the frame in an unspilled state.
+ frame_->SpillAll(); // Sync stack to memory.
+ // Get the value (under the reference on the stack) from memory.
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop(2);
+ frame_->Drop(2); // The result of the set and the extra pushed value.
} else {
// If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
- frame_->Drop();
+ frame_->Drop(1); // Drop the result of the set operation.
}
}
}
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ Visit(node->body());
+ }
// Next. Reestablish a spilled frame in case we are coming here via
// a continue in the body.
@@ -2558,7 +2581,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Remove the exception from the stack.
frame_->Drop();
- VisitStatements(node->catch_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->catch_block()->statements());
+ }
if (frame_ != NULL) {
exit.Jump();
}
@@ -2593,7 +2618,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->try_block()->statements());
+ }
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2614,7 +2641,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// the handler list and drop the rest of this handler from the
// frame.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
+ frame_->EmitPop(r1); // r0 can contain the return value.
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2640,7 +2667,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->Forget(frame_->height() - handler_height);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- frame_->EmitPop(r1);
+ frame_->EmitPop(r1); // r0 can contain the return value.
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2707,7 +2734,9 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatements(node->try_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->try_block()->statements());
+ }
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2797,7 +2826,9 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// and the state - while evaluating the finally block.
//
// Generate code for the statements in the finally block.
- VisitStatements(node->finally_block()->statements());
+ { VirtualFrame::RegisterAllocationScope scope(this);
+ VisitStatements(node->finally_block()->statements());
+ }
if (has_valid_frame()) {
// Restore state and return value or faked TOS.
@@ -3414,12 +3445,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length();
- if (node->depth() > 1) {
+ if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ frame_->CallStub(&stub, 3);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ } else if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
frame_->CallStub(&stub, 3);
}
frame_->EmitPush(r0); // save the result
@@ -3977,7 +4014,6 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript examples:
//
@@ -3990,8 +4026,6 @@ void CodeGenerator::VisitCall(Call* node) {
// }
// ----------------------------------
- // JumpTargets do not yet support merging frames so the frame must be
- // spilled when jumping to these targets.
JumpTarget slow, done;
// Generate fast case for loading functions from slots that
@@ -4005,8 +4039,7 @@ void CodeGenerator::VisitCall(Call* node) {
slow.Bind();
// Load the function
frame_->EmitPush(cp);
- __ mov(r0, Operand(var->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(var->name()));
frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
// r0: slot value; r1: receiver
@@ -4022,7 +4055,7 @@ void CodeGenerator::VisitCall(Call* node) {
call.Jump();
done.Bind();
frame_->EmitPush(r0); // function
- LoadGlobalReceiver(r1); // receiver
+ LoadGlobalReceiver(VirtualFrame::scratch0()); // receiver
call.Bind();
}
@@ -4077,8 +4110,6 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
Load(property->obj());
if (property->is_synthetic()) {
Load(property->key());
@@ -4086,7 +4117,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Put the function below the receiver.
// Use the global receiver.
frame_->EmitPush(r0); // Function.
- LoadGlobalReceiver(r0);
+ LoadGlobalReceiver(VirtualFrame::scratch0());
// Call the function.
CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
@@ -4099,6 +4130,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Set the name register and call the IC initialization code.
Load(property->key());
+ frame_->SpillAll();
frame_->EmitPop(r2); // Function name.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -4118,10 +4150,8 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function.
Load(function);
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
// Pass the global proxy as the receiver.
- LoadGlobalReceiver(r0);
+ LoadGlobalReceiver(VirtualFrame::scratch0());
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -4176,21 +4206,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 1);
- JumpTarget leave, null, function, non_function_constructor;
+ Register scratch = VirtualFrame::scratch0();
+ JumpTarget null, function, leave, non_function_constructor;
- // Load the object into r0.
+ // Load the object into register.
+ ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->EmitPop(r0);
+ Register tos = frame_->PopToRegister();
// If the object is a smi, we return null.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
null.Branch(eq);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
null.Branch(lt);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
@@ -4198,37 +4228,38 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// LAST_JS_OBJECT_TYPE.
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
function.Branch(eq);
// Check if the constructor in the map is a function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
+ __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
non_function_constructor.Branch(ne);
- // The r0 register now contains the constructor function. Grab the
+ // The tos register now contains the constructor function. Grab the
// instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- frame_->EmitPush(r0);
+ __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(tos,
+ FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->EmitPush(tos);
leave.Jump();
// Functions have class 'Function'.
function.Bind();
- __ mov(r0, Operand(Factory::function_class_symbol()));
- frame_->EmitPush(r0);
+ __ mov(tos, Operand(Factory::function_class_symbol()));
+ frame_->EmitPush(tos);
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
- __ mov(r0, Operand(Factory::Object_symbol()));
- frame_->EmitPush(r0);
+ __ mov(tos, Operand(Factory::Object_symbol()));
+ frame_->EmitPush(tos);
leave.Jump();
// Non-JS objects have class null.
null.Bind();
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
- frame_->EmitPush(r0);
+ __ LoadRoot(tos, Heap::kNullValueRootIndex);
+ frame_->EmitPush(tos);
// All done.
leave.Bind();
@@ -4236,45 +4267,51 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 1);
+ Register scratch = VirtualFrame::scratch0();
JumpTarget leave;
+
+ ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->EmitPop(r0); // r0 contains object.
+ Register tos = frame_->PopToRegister(); // tos contains object.
// if (object->IsSmi()) return the object.
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
leave.Branch(eq);
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+ __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
leave.Branch(ne);
// Load the value.
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
leave.Bind();
- frame_->EmitPush(r0);
+ frame_->EmitPush(tos);
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- ASSERT(args->length() == 2);
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
JumpTarget leave;
+
+ ASSERT(args->length() == 2);
Load(args->at(0)); // Load the object.
Load(args->at(1)); // Load the value.
- frame_->EmitPop(r0); // r0 contains value
- frame_->EmitPop(r1); // r1 contains object
+ Register value = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(value);
// if (object->IsSmi()) return object.
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(object, Operand(kSmiTagMask));
leave.Branch(eq);
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+ __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
leave.Branch(ne);
// Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+ __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
// Update the write barrier.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+ __ RecordWrite(object,
+ Operand(JSValue::kValueOffset - kHeapObjectTag),
+ scratch1,
+ scratch2);
// Leave.
leave.Bind();
- frame_->EmitPush(r0);
+ frame_->EmitPush(value);
}
@@ -4558,22 +4595,18 @@ class DeferredStringCharCodeAt : public DeferredCode {
// This generates code that performs a String.prototype.charCodeAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- Register index = r1;
- Register object = r2;
-
- frame_->EmitPop(r1);
- frame_->EmitPop(r2);
+ Register index = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(index);
// We need two extra registers.
- Register scratch = r3;
- Register result = r0;
+ Register scratch = VirtualFrame::scratch0();
+ Register result = VirtualFrame::scratch1();
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(object,
@@ -4608,16 +4641,13 @@ class DeferredStringCharFromCode : public DeferredCode {
// Generates code for creating a one-char string from a char code.
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
- Register code = r1;
- Register result = r0;
-
- frame_->EmitPop(code);
+ Register result = frame_->GetTOSRegister();
+ Register code = frame_->PopToRegister(result);
DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
code, result);
@@ -4679,23 +4709,20 @@ class DeferredStringCharAt : public DeferredCode {
// This generates code that performs a String.prototype.charAt() call
// or returns a smi in order to trigger conversion.
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment(masm_, "[ GenerateStringCharAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- Register index = r1;
- Register object = r2;
-
- frame_->EmitPop(r1);
- frame_->EmitPop(r2);
+ Register index = frame_->PopToRegister();
+ Register object = frame_->PopToRegister(index);
// We need three extra registers.
- Register scratch1 = r3;
- Register scratch2 = r4;
- Register result = r0;
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+ // Use r6 without notifying the virtual frame.
+ Register result = r6;
DeferredStringCharAt* deferred =
new DeferredStringCharAt(object,
@@ -4793,6 +4820,152 @@ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
}
+// Deferred code to check whether the String JavaScript object is safe for using
+// default value of. This code is called after the bit caching this information
+// in the map has been checked with the map for the object in the map_result_
+// register. On return the register map_result_ contains 1 for true and 0 for
+// false.
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ Label false_result;
+
+ // Check that map is loaded as expected.
+ if (FLAG_debug_code) {
+ __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ cmp(map_result_, ip);
+ __ Assert(eq, "Map not in expected register");
+ }
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
+ __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(scratch1_, ip);
+ __ b(eq, &false_result);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ ldr(map_result_,
+ FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
+ __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
+ // map_result_: descriptor array
+ // scratch2_: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ add(scratch1_,
+ map_result_,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch1_,
+ scratch1_,
+ Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Calculate location of the first key name.
+ __ add(map_result_,
+ map_result_,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ mov(ip, Operand(Factory::value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ ldr(scratch2_, MemOperand(map_result_, 0));
+ __ cmp(scratch2_, ip);
+ __ b(eq, &false_result);
+ __ add(map_result_, map_result_, Operand(kPointerSize));
+ __ bind(&entry);
+ __ cmp(map_result_, Operand(scratch1_));
+ __ b(ne, &loop);
+
+ // Reload map as register map_result_ was used as temporary above.
+ __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
+ __ tst(scratch1_, Operand(kSmiTagMask));
+ __ b(eq, &false_result);
+ __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
+ __ ldr(scratch2_,
+ CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(scratch2_,
+ FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
+ __ ldr(scratch2_,
+ CodeGenerator::ContextOperand(
+ scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(scratch1_, scratch2_);
+ __ b(ne, &false_result);
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ orr(scratch1_,
+ scratch1_,
+ Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+ __ mov(map_result_, Operand(1));
+ __ jmp(exit_label());
+ __ bind(&false_result);
+ // Set false result.
+ __ mov(map_result_, Operand(0));
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register obj = frame_->PopToRegister(); // Pop the string wrapper.
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(obj);
+ }
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ Register map_result = VirtualFrame::scratch0();
+ __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
+ __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ true_target()->Branch(ne);
+
+ // We need an additional two scratch registers for the deferred code.
+ Register scratch1 = VirtualFrame::scratch1();
+ // Use r6 without notifying the virtual frame.
+ Register scratch2 = r6;
+
+ DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
+ new DeferredIsStringWrapperSafeForDefaultValueOf(
+ obj, map_result, scratch1, scratch2);
+ deferred->Branch(eq);
+ deferred->BindExit();
+ __ tst(map_result, Operand(map_result));
+ cc_reg_ = ne;
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -4874,13 +5047,13 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
// Load the key into r1 and the formal parameters count into r0.
Load(args->at(0));
- frame_->EmitPop(r1);
+ frame_->PopToR1();
+ frame_->SpillAll();
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
// Call the shared stub to get to arguments[key].
@@ -5108,9 +5281,7 @@ class DeferredSearchCache: public DeferredCode {
void DeferredSearchCache::Generate() {
__ Push(cache_, key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
- if (!dst_.is(r0)) {
- __ mov(dst_, r0);
- }
+ __ Move(dst_, r0);
}
@@ -5130,33 +5301,42 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
Load(args->at(1));
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- frame_->EmitPop(r2);
+ frame_->PopToR1();
+ frame_->SpillAll();
+ Register key = r1; // Just poped to r1
+ Register result = r0; // Free, as frame has just been spilled.
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
+ __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
+ __ ldr(scratch1,
+ ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
- DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
+ DeferredSearchCache* deferred =
+ new DeferredSearchCache(result, scratch1, key);
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
- // r0 now holds finger offset as a smi.
- __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(r2, r0);
+ __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
+ // result now holds finger offset as a smi.
+ __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // scratch2 now points to the start of fixed array elements.
+ __ ldr(result,
+ MemOperand(
+ scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+ // Note side effect of PreIndex: scratch2 now points to the key of the pair.
+ __ cmp(key, result);
deferred->Branch(ne);
- __ ldr(r0, MemOperand(r3, kPointerSize));
+ __ ldr(result, MemOperand(scratch2, kPointerSize));
deferred->BindExit();
- frame_->EmitPush(r0);
+ frame_->EmitPush(result);
}
@@ -5228,7 +5408,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz);
- // Check the object's elements are in fast case.
+ // Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
__ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
@@ -6520,15 +6700,9 @@ void CodeGenerator::EmitKeyedLoad() {
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
+ // Get the elements array from the receiver.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch2, ip);
- __ Assert(eq, "JSObject with fast elements map has slow elements");
- }
+ __ AssertFastElements(scratch1);
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
@@ -6851,6 +7025,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
+
// Return result. The argument function info has been popped already.
__ Ret();
@@ -6933,6 +7112,26 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(eq, &slow_case);
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(r3);
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, expected_map_index);
+ __ cmp(r3, ip);
+ __ Assert(eq, message);
+ __ pop(r3);
+ }
+
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size,
@@ -7801,6 +8000,77 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = VirtualFrame::scratch0();
+
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch0, ip);
+ __ b(&not_heap_number, ne);
+
+ __ sub(ip, tos_, Operand(kHeapObjectTag));
+ __ vldr(d1, ip, HeapNumber::kValueOffset);
+ __ vcmp(d1, 0.0);
+ __ vmrs(pc);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // Check if the value is 'null'.
+ // 'null' => false.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(&false_result, eq);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset));
+ __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch0, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, eq);
+
+ // JavaScript object => true.
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // Check for string
+ __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // String value => false iff empty, i.e., length is zero
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false .
+ __ bind(&false_result);
+ __ mov(tos_, Operand(0));
+ __ Ret();
+}
+
+
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in r0 and r1. In order
@@ -10444,11 +10714,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(r0)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, r0);
- }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(scratch_, r0);
__ pop(index_);
__ pop(object_);
// Reload the instance type.
@@ -10467,9 +10735,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(r0)) {
- __ mov(result_, r0);
- }
+ __ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -10510,9 +10776,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(r0)) {
- __ mov(result_, r0);
- }
+ __ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index bfe20809..8a632a9c 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -278,7 +278,7 @@ class CodeGenerator: public AstVisitor {
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 27 : 13;
+ return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
@@ -286,6 +286,10 @@ class CodeGenerator: public AstVisitor {
return inlined_write_barrier_size_ + 4;
}
+ static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -338,10 +342,6 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
- }
-
MemOperand SlotOperand(Slot* slot, Register tmp);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
@@ -482,6 +482,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -623,6 +625,19 @@ class TranscendentalCacheStub: public CodeStub {
};
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e87d265e..3a948451 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -293,15 +293,11 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
+const bool Debug::kFrameDropperSupported = false;
+
#undef __
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- UNREACHABLE();
- return NULL;
-}
-const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index fd142bd9..0029ed16 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1188,7 +1188,13 @@ void Decoder::DecodeVCMP(Instr* instr) {
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ if (instr->Opc2Field() == 0x4) {
+ Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ } else if (instr->Opc2Field() == 0x5) {
+ Format(instr, "vcmp.f64'cond 'Dd, #0.0");
+ } else {
+ Unknown(instr); // invalid
+ }
} else {
Unknown(instr); // Not used by V8.
}
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
deleted file mode 100644
index 36ac2aa3..00000000
--- a/src/arm/fast-codegen-arm.cc
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-Register FastCodeGenerator::accumulator0() { return r0; }
-Register FastCodeGenerator::accumulator1() { return r1; }
-Register FastCodeGenerator::scratch0() { return r3; }
-Register FastCodeGenerator::scratch1() { return r4; }
-Register FastCodeGenerator::scratch2() { return r5; }
-Register FastCodeGenerator::receiver_reg() { return r2; }
-Register FastCodeGenerator::context_reg() { return cp; }
-
-
-void FastCodeGenerator::EmitLoadReceiver() {
- // Offset 2 is due to return address and saved frame pointer.
- int index = 2 + scope()->num_parameters();
- __ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
- ASSERT(!destination().is(no_reg));
- ASSERT(cell->IsJSGlobalPropertyCell());
-
- __ mov(destination(), Operand(cell));
- __ ldr(destination(),
- FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
- if (FLAG_debug_code) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- __ cmp(destination(), ip);
- __ Check(ne, "DontDelete cells can't contain the hole");
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // We will emit the write barrier unless the stored value is statically
- // known to be a smi.
- bool needs_write_barrier = !is_smi(accumulator0());
-
- // Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
- if (needs_write_barrier) {
- // Preserve receiver from write barrier.
- __ mov(scratch0(), receiver_reg());
- }
- } else {
- offset += FixedArray::kHeaderSize;
- __ ldr(scratch0(),
- FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ str(accumulator0(), FieldMemOperand(scratch0(), offset));
- }
-
- if (needs_write_barrier) {
- __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
- }
-
- if (destination().is(accumulator1())) {
- __ mov(accumulator1(), accumulator0());
- if (is_smi(accumulator0())) {
- set_as_smi(accumulator1());
- } else {
- clear_as_smi(accumulator1());
- }
- }
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- ASSERT(!destination().is(no_reg));
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // Perform the load. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
- } else {
- offset += FixedArray::kHeaderSize;
- __ ldr(scratch0(),
- FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ ldr(destination(), FieldMemOperand(scratch0(), offset));
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- if (is_smi(accumulator0()) && is_smi(accumulator1())) {
- // If both operands are known to be a smi then there is no need to check
- // the operands or result. There is no need to perform the operation in
- // an effect context.
- if (!destination().is(no_reg)) {
- __ orr(destination(), accumulator1(), Operand(accumulator0()));
- }
- } else {
- // Left is in accumulator1, right in accumulator0.
- if (destination().is(accumulator0())) {
- __ mov(scratch0(), accumulator0());
- __ orr(destination(), accumulator1(), Operand(accumulator1()));
- Label* bailout =
- info()->AddBailout(accumulator1(), scratch0()); // Left, right.
- __ BranchOnNotSmi(destination(), bailout);
- } else if (destination().is(accumulator1())) {
- __ mov(scratch0(), accumulator1());
- __ orr(destination(), accumulator1(), Operand(accumulator0()));
- Label* bailout = info()->AddBailout(scratch0(), accumulator0());
- __ BranchOnNotSmi(destination(), bailout);
- } else {
- ASSERT(destination().is(no_reg));
- __ orr(scratch0(), accumulator1(), Operand(accumulator0()));
- Label* bailout = info()->AddBailout(accumulator1(), accumulator0());
- __ BranchOnNotSmi(scratch0(), bailout);
- }
- }
-
- // If we didn't bailout, the result (in fact, both inputs too) is known to
- // be a smi.
- set_as_smi(accumulator0());
- set_as_smi(accumulator1());
-}
-
-
-void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
- ASSERT(info_ == NULL);
- info_ = compilation_info;
- Comment cmnt(masm_, "[ function compiled by fast code generator");
-
- // Save the caller's frame pointer and set up our own.
- Comment prologue_cmnt(masm(), ";; Prologue");
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(2 * kPointerSize));
- // Note that we keep a live register reference to cp (context) at
- // this point.
-
- Label* bailout_to_beginning = info()->AddBailout();
- // Receiver (this) is allocated to a fixed register.
- if (info()->has_this_properties()) {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
- ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
- Handle<Map> map(object->map());
- EmitLoadReceiver();
- __ CheckMap(receiver_reg(), scratch0(), map, bailout_to_beginning, false);
- }
-
- // If there is a global variable access check if the global object is the
- // same as at lazy-compilation time.
- if (info()->has_globals()) {
- Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(GLOBAL)\n");
- }
- ASSERT(info()->has_global_object());
- Handle<Map> map(info()->global_object()->map());
- __ ldr(scratch0(), CodeGenerator::GlobalObject());
- __ CheckMap(scratch0(), scratch1(), map, bailout_to_beginning, true);
- }
-
- VisitStatements(function()->body());
-
- Comment return_cmnt(masm(), ";; Return(<undefined>)");
- if (FLAG_print_ir) {
- PrintF("Return(<undefined>)\n");
- }
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index ea5a8f2a..f3adab28 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -55,99 +55,97 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- if (mode == PRIMARY) {
- int locals_count = scope()->num_stack_slots();
+ int locals_count = scope()->num_stack_slots();
- __ Push(lr, fp, cp, r1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ __ Push(lr, fp, cp, r1);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
}
+ }
- bool function_in_register = true;
+ bool function_in_register = true;
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(slot->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
- }
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
+ }
- Variable* arguments = scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
+ Variable* arguments = scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
}
+ // Receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ add(r2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ Push(r3, r2, r1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -956,15 +954,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
- __ mov(r3, Operand(r0));
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r3, ip);
+ __ mov(r3, Operand(r0), SetCC);
__ b(eq, loop_statement.continue_target());
// Update the 'each' property or variable from the possibly filtered
@@ -1227,12 +1223,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
__ Push(r3, r2, r1);
- if (expr->depth() > 1) {
+ if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
+ } else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@@ -1959,6 +1961,26 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
+ // used in a few functions in runtime.js which should not normally be hit by
+ // this compiler.
+ __ jmp(if_false);
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 1fd70982..abc09222 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -414,17 +414,17 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
- Register scratch1,
- Register scratch2,
+ Register map,
+ Register scratch,
int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
// Get the map of the receiver.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2,
+ __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
@@ -432,13 +432,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch1, Operand(JS_OBJECT_TYPE));
+ __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(JS_OBJECT_TYPE));
__ b(lt, slow);
}
// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -471,11 +472,15 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// scratch2 - used to hold the loaded value.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch1, ip);
+ __ b(ne, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
// Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1));
@@ -1120,16 +1125,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in r2.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ tst(r3, Operand(1 << Map::kHasFastElements));
+ __ b(eq, &check_pixel_array);
+
GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
+ masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
// Check whether the elements is a pixel array.
// r0: key
- // r3: elements map
- // r4: elements
+ // r1: receiver
__ bind(&check_pixel_array);
+ __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
@@ -1690,7 +1702,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
+ // Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
@@ -1748,8 +1760,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(&fast);
// Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode; if it is the
- // length is always a smi.
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -1779,19 +1791,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
-// Convert int passed in register ival to IEE 754 single precision
-// floating point value and store it into register fval.
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion.
-static void ConvertIntToFloat(MacroAssembler* masm,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
+ __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
- __ vmov(fval, s0);
+ __ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
@@ -1841,6 +1856,7 @@ static void ConvertIntToFloat(MacroAssembler* masm,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
+ __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
}
@@ -1935,9 +1951,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- ConvertIntToFloat(masm, r5, r6, r7, r9);
- __ str(r6, MemOperand(r3, r4, LSL, 2));
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
default:
UNREACHABLE();
@@ -1971,9 +1986,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
- __ vmov(r5, s0);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
+ __ vstr(s0, r5, 0);
} else {
// Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero).
@@ -2217,6 +2232,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss);
// Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 9c25ccde..2058ee28 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -757,7 +757,7 @@ void MacroAssembler::InvokeFunction(Register fun,
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
ParameterCount expected(expected_reg);
@@ -1508,8 +1508,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(r1);
- ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+ ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
cmp(r1, target);
Assert(eq, "Builtin code object changed");
pop(r1);
@@ -1568,6 +1567,25 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
}
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ ASSERT(!elements.is(ip));
+ Label ok;
+ push(elements);
+ ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
+ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ pop(elements);
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@@ -1656,6 +1674,13 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
}
+void MacroAssembler::AbortIfSmi(Register object) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is a smi");
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 37a1b1cb..7843f006 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -597,6 +597,7 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
@@ -618,6 +619,9 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+
// ---------------------------------------------------------------------------
// String utilities
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 04635e3f..c4cc8d46 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2431,11 +2431,17 @@ void Simulator::DecodeVCMP(Instr* instr) {
}
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
- int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ int m = 0;
+ if (instr->Opc2Field() == 0x4) {
+ m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ }
if (dp_operation) {
double dd_value = get_double_from_d_register(d);
- double dm_value = get_double_from_d_register(m);
+ double dm_value = 0.0;
+ if (instr->Opc2Field() == 0x4) {
+ dm_value = get_double_from_d_register(m);
+ }
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 8c8e702d..2a7c22d7 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1212,38 +1212,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // ----------- S t a t e -------------
- // -- r1: function
- // -- lr: return address
- // -----------------------------------
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Preserve the function.
- __ push(r1);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore saved function.
- __ pop(r1);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(r2, Operand(Handle<String>(name)));
@@ -1329,11 +1297,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
@@ -1381,11 +1344,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);