summaryrefslogtreecommitdiffstats
path: root/src/x64
diff options
context:
space:
mode:
authorIain Merrick <husky@google.com>2010-08-19 15:07:18 +0100
committerIain Merrick <husky@google.com>2010-08-23 14:25:34 +0100
commit756813857a4c2a4d8ad2e805969d5768d3cf43a0 (patch)
tree002fad3c25654870c9634232d53a48219346c50b /src/x64
parentbb769b257e753aafcbd96767abb2abc645eaa20c (diff)
downloadandroid_external_v8-756813857a4c2a4d8ad2e805969d5768d3cf43a0.tar.gz
android_external_v8-756813857a4c2a4d8ad2e805969d5768d3cf43a0.tar.bz2
android_external_v8-756813857a4c2a4d8ad2e805969d5768d3cf43a0.zip
Update V8 to r5295 as required by WebKit r65615
Change-Id: I1d72d4990703e88b7798919c7a53e12ebf76958a
Diffstat (limited to 'src/x64')
-rw-r--r--src/x64/assembler-x64-inl.h23
-rw-r--r--src/x64/assembler-x64.cc2
-rw-r--r--src/x64/builtins-x64.cc22
-rw-r--r--src/x64/codegen-x64.cc363
-rw-r--r--src/x64/codegen-x64.h22
-rw-r--r--src/x64/debug-x64.cc32
-rw-r--r--src/x64/fast-codegen-x64.cc250
-rw-r--r--src/x64/full-codegen-x64.cc193
-rw-r--r--src/x64/ic-x64.cc31
-rw-r--r--src/x64/macro-assembler-x64.cc33
-rw-r--r--src/x64/macro-assembler-x64.h5
-rw-r--r--src/x64/stub-cache-x64.cc60
12 files changed, 523 insertions, 513 deletions
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index c8abd22a..44159e06 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -350,6 +350,29 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
}
+template<typename StaticVisitor>
+void RelocInfo::Visit() {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (Debug::has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Operand
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d90655b0..9ad94ce0 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -253,7 +253,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
int32_t disp_value = 0;
if (mode == 0x80 || is_baseless) {
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
+ disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
} else if (mode == 0x40) {
// Mode 1: Byte displacement.
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 959b4b03..4f2d2b96 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -310,7 +310,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
__ j(not_equal,
@@ -1291,6 +1291,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ pop(rdi);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rcx);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b6256faf..18851822 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -201,103 +201,89 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// rsi: callee's context
allocator_->Initialize();
- if (info->mode() == CompilationInfo::PRIMARY) {
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ allocate local context");
- // Allocate local context.
- // Get outer context and create a new context based on it.
- frame_->PushFunction();
- Result context;
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- context = frame_->CallStub(&stub, 1);
- } else {
- context = frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Allocate the local context if needed.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ allocate local context");
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ frame_->PushFunction();
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
- // Update context local.
- frame_->SaveContextRegister();
+ // Update context local.
+ frame_->SaveContextRegister();
- // Verify that the runtime call result and rsi agree.
- if (FLAG_debug_code) {
- __ cmpq(context.reg(), rsi);
- __ Assert(equal, "Runtime::NewContext should end up in rsi");
- }
+ // Verify that the runtime call result and rsi agree.
+ if (FLAG_debug_code) {
+ __ cmpq(context.reg(), rsi);
+ __ Assert(equal, "Runtime::NewContext should end up in rsi");
}
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- Variable* par = scope()->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- // The use of SlotOperand below is safe in unspilled code
- // because the slot is guaranteed to be a context slot.
- //
- // There are no parameters in the global scope.
- ASSERT(!scope()->is_global_scope());
- frame_->PushParameterAt(i);
- Result value = frame_->Pop();
- value.ToRegister();
-
- // SlotOperand loads context.reg() with the context object
- // stored to, used below in RecordWrite.
- Result context = allocator_->Allocate();
- ASSERT(context.is_valid());
- __ movq(SlotOperand(slot, context.reg()), value.reg());
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- frame_->Spill(context.reg());
- frame_->Spill(value.reg());
- __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
- }
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ Variable* par = scope()->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // The use of SlotOperand below is safe in unspilled code
+ // because the slot is guaranteed to be a context slot.
+ //
+ // There are no parameters in the global scope.
+ ASSERT(!scope()->is_global_scope());
+ frame_->PushParameterAt(i);
+ Result value = frame_->Pop();
+ value.ToRegister();
+
+ // SlotOperand loads context.reg() with the context object
+ // stored to, used below in RecordWrite.
+ Result context = allocator_->Allocate();
+ ASSERT(context.is_valid());
+ __ movq(SlotOperand(slot, context.reg()), value.reg());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ Result scratch = allocator_->Allocate();
+ ASSERT(scratch.is_valid());
+ frame_->Spill(context.reg());
+ frame_->Spill(value.reg());
+ __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
}
}
+ }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in
- // the context.
- if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
- StoreArgumentsObject(true);
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in
+ // the context.
+ if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+ StoreArgumentsObject(true);
+ }
- // Initialize ThisFunction reference if present.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- frame_->Push(Factory::the_hole_value());
- StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
- }
- } else {
- // When used as the secondary compiler for splitting, rbp, rsi,
- // and rdi have been pushed on the stack. Adjust the virtual
- // frame to match this state.
- frame_->Adjust(3);
- allocator_->Unuse(rdi);
-
- // Bind all the bailout labels to the beginning of the function.
- List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
- for (int i = 0; i < bailouts->length(); i++) {
- __ bind(bailouts->at(i)->label());
- }
+ // Initialize ThisFunction reference if present.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
// Initialize the function return target after the locals are set
@@ -2630,9 +2616,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(is_smi, &build_args);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -3926,7 +3911,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ movq(rbx, rax);
// If the property has been removed while iterating, we just skip it.
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+ __ SmiCompare(rbx, Smi::FromInt(0));
node->continue_target()->Branch(equal);
end_del_check.Bind();
@@ -5004,12 +4989,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->depth() > 1) {
+ if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ clone = frame_->CallStub(&stub, 3);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (node->depth() > 1) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
clone = frame_->CallStub(&stub, 3);
}
frame_->Push(&clone);
@@ -5019,12 +5010,9 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i);
- // If value is a literal the property value is already set in the
- // boilerplate object.
- if (value->AsLiteral() != NULL) continue;
- // If value is a materialized literal the property value is already set
- // in the boilerplate object if it is simple.
- if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+ if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
+ continue;
+ }
// The property must be set by generated code.
Load(value);
@@ -6041,6 +6029,143 @@ void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
}
+// Deferred code to check whether the String JavaScript object is safe for using
+// default value of. This code is called after the bit caching this information
+// in the map has been checked with the map for the object in the map_result_
+// register. On return the register map_result_ contains 1 for true and 0 for
+// false.
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ Label false_result;
+
+ // Check that map is loaded as expected.
+ if (FLAG_debug_code) {
+ __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ Assert(equal, "Map not in expected register");
+ }
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
+ __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
+ __ j(equal, &false_result);
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ movq(map_result_,
+ FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
+ __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
+ // map_result_: descriptor array
+ // scratch1_: length of descriptor array
+ // Calculate the end of the descriptor array.
+ SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
+ __ lea(scratch1_,
+ Operand(
+ map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
+ // Calculate location of the first key name.
+ __ addq(map_result_,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(scratch2_, FieldOperand(map_result_, 0));
+ __ Cmp(scratch2_, Factory::value_of_symbol());
+ __ j(equal, &false_result);
+ __ addq(map_result_, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(map_result_, scratch1_);
+ __ j(not_equal, &loop);
+
+ // Reload map as register map_result_ was used as temporary above.
+ __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
+ __ testq(scratch1_, Immediate(kSmiTagMask));
+ __ j(zero, &false_result);
+ __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
+ __ movq(scratch2_,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(scratch2_,
+ FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
+ __ cmpq(scratch1_,
+ CodeGenerator::ContextOperand(
+ scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &false_result);
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ Set(map_result_, 1);
+ __ jmp(exit_label());
+ __ bind(&false_result);
+ // Set false result.
+ __ Set(map_result_, 0);
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop(); // Pop the string wrapper.
+ obj.ToRegister();
+ ASSERT(obj.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfSmi(obj.reg());
+ }
+
+ // Check whether this map has already been checked to be safe for default
+ // valueOf.
+ Result map_result = allocator()->Allocate();
+ ASSERT(map_result.is_valid());
+ __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ destination()->true_target()->Branch(not_zero);
+
+ // We need an additional two scratch registers for the deferred code.
+ Result temp1 = allocator()->Allocate();
+ ASSERT(temp1.is_valid());
+ Result temp2 = allocator()->Allocate();
+ ASSERT(temp2.is_valid());
+
+ DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
+ new DeferredIsStringWrapperSafeForDefaultValueOf(
+ obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
+ deferred->Branch(zero);
+ deferred->BindExit();
+ __ testq(map_result.reg(), map_result.reg());
+ obj.Unuse();
+ map_result.Unuse();
+ temp1.Unuse();
+ temp2.Unuse();
+ destination()->Split(not_equal);
+}
+
+
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
@@ -6753,7 +6878,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(not_zero);
- // Check the object's elements are in fast case.
+ // Check the object's elements are in fast case and writable.
__ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
@@ -8297,15 +8422,10 @@ Result CodeGenerator::EmitKeyedLoad() {
// Check that the key is a non-negative smi.
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
+ // Get the elements array from the receiver.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- __ Assert(equal, "JSObject with fast elements map has slow elements");
- }
+ __ AssertFastElements(elements.reg());
// Check that key is within bounds.
__ SmiCompare(key.reg(),
@@ -8635,6 +8755,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
+
+
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
@@ -8713,6 +8839,25 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
__ j(equal, &slow_case);
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(rcx);
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ expected_map_index);
+ __ Assert(equal, message);
+ __ pop(rcx);
+ }
+
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
__ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 2806f567..14f690eb 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -347,6 +347,10 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+ static Operand ContextOperand(Register context, int index) {
+ return Operand(context, Context::SlotOffset(index));
+ }
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -406,10 +410,6 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- static Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
- }
-
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
@@ -611,6 +611,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -764,6 +766,18 @@ class TranscendentalCacheStub: public CodeStub {
};
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 2aa77e77..d5b7e776 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -202,23 +202,39 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on x64");
+ masm->ret(0);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on x64");
+ ExternalReference restarter_frame_function_slot =
+ ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
+ __ movq(rax, restarter_frame_function_slot);
+ __ movq(Operand(rax, 0), Immediate(0));
+
+ // We do not know our frame height, but set rsp based on rbp.
+ __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+
+ __ pop(rdi); // Function.
+ __ pop(rbp);
+
+ // Load context from the function.
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Get function code.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+ // Re-run JSFunction, rdi is function, rsi is context.
+ __ jmp(rdx);
}
+const bool Debug::kFrameDropperSupported = true;
+
#undef __
-Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
- UNREACHABLE();
- return NULL;
-}
-const int Debug::kFrameDropperFrameSize = -1;
void BreakLocationIterator::ClearDebugBreakAtReturn() {
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
deleted file mode 100644
index 13eef030..00000000
--- a/src/x64/fast-codegen-x64.cc
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-Register FastCodeGenerator::accumulator0() { return rax; }
-Register FastCodeGenerator::accumulator1() { return rdx; }
-Register FastCodeGenerator::scratch0() { return rcx; }
-Register FastCodeGenerator::scratch1() { return rdi; }
-Register FastCodeGenerator::receiver_reg() { return rbx; }
-Register FastCodeGenerator::context_reg() { return rsi; }
-
-
-void FastCodeGenerator::EmitLoadReceiver() {
- // Offset 2 is due to return address and saved frame pointer.
- int index = 2 + scope()->num_parameters();
- __ movq(receiver_reg(), Operand(rbp, index * kPointerSize));
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
- ASSERT(!destination().is(no_reg));
- ASSERT(cell->IsJSGlobalPropertyCell());
-
- __ Move(destination(), cell);
- __ movq(destination(),
- FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
- if (FLAG_debug_code) {
- __ Cmp(destination(), Factory::the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // We will emit the write barrier unless the stored value is statically
- // known to be a smi.
- bool needs_write_barrier = !is_smi(accumulator0());
-
- // Perform the store. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ movq(FieldOperand(receiver_reg(), offset), accumulator0());
- if (needs_write_barrier) {
- // Preserve receiver from write barrier.
- __ movq(scratch0(), receiver_reg());
- }
- } else {
- offset += FixedArray::kHeaderSize;
- __ movq(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch0(), offset), accumulator0());
- }
-
- if (needs_write_barrier) {
- if (destination().is(no_reg)) {
- // After RecordWrite accumulator0 is only accidently a smi, but it is
- // already marked as not known to be one.
- __ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
- } else {
- // Copy the value to the other accumulator to preserve a copy from the
- // write barrier. One of the accumulators is available as a scratch
- // register. Neither is a smi.
- __ movq(accumulator1(), accumulator0());
- clear_as_smi(accumulator1());
- Register value_scratch = other_accumulator(destination());
- __ RecordWrite(scratch0(), offset, value_scratch, scratch1());
- }
- } else if (destination().is(accumulator1())) {
- __ movq(accumulator1(), accumulator0());
- // Is a smi because we do not need the write barrier.
- set_as_smi(accumulator1());
- }
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- ASSERT(!destination().is(no_reg));
- LookupResult lookup;
- info()->receiver()->Lookup(*name, &lookup);
-
- ASSERT(lookup.holder() == *info()->receiver());
- ASSERT(lookup.type() == FIELD);
- Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
- int index = lookup.GetFieldIndex() - map->inobject_properties();
- int offset = index * kPointerSize;
-
- // Perform the load. Negative offsets are inobject properties.
- if (offset < 0) {
- offset += map->instance_size();
- __ movq(destination(), FieldOperand(receiver_reg(), offset));
- } else {
- offset += FixedArray::kHeaderSize;
- __ movq(scratch0(),
- FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
- __ movq(destination(), FieldOperand(scratch0(), offset));
- }
-
- // The loaded value is not known to be a smi.
- clear_as_smi(destination());
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- if (is_smi(accumulator0()) && is_smi(accumulator1())) {
- // If both operands are known to be a smi then there is no need to check
- // the operands or result.
- if (destination().is(no_reg)) {
- __ or_(accumulator1(), accumulator0());
- } else {
- // Leave the result in the destination register. Bitwise or is
- // commutative.
- __ or_(destination(), other_accumulator(destination()));
- }
- } else {
- // Left is in accumulator1, right in accumulator0.
- if (destination().is(accumulator0())) {
- __ movq(scratch0(), accumulator0());
- __ or_(destination(), accumulator1()); // Or is commutative.
- Label* bailout =
- info()->AddBailout(accumulator1(), scratch0()); // Left, right.
- __ JumpIfNotSmi(destination(), bailout);
- } else if (destination().is(accumulator1())) {
- __ movq(scratch0(), accumulator1());
- __ or_(destination(), accumulator0());
- Label* bailout = info()->AddBailout(scratch0(), accumulator0());
- __ JumpIfNotSmi(destination(), bailout);
- } else {
- ASSERT(destination().is(no_reg));
- __ movq(scratch0(), accumulator1());
- __ or_(scratch0(), accumulator0());
- Label* bailout = info()->AddBailout(accumulator1(), accumulator0());
- __ JumpIfNotSmi(scratch0(), bailout);
- }
- }
-
- // If we didn't bailout, the result (in fact, both inputs too) is known to
- // be a smi.
- set_as_smi(accumulator0());
- set_as_smi(accumulator1());
-}
-
-
-void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
- ASSERT(info_ == NULL);
- info_ = compilation_info;
- Comment cmnt(masm_, "[ function compiled by fast code generator");
-
- // Save the caller's frame pointer and set up our own.
- Comment prologue_cmnt(masm(), ";; Prologue");
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi); // Context.
- __ push(rdi); // Closure.
- // Note that we keep a live register reference to esi (context) at this
- // point.
-
- Label* bailout_to_beginning = info()->AddBailout();
- // Receiver (this) is allocated to a fixed register.
- if (info()->has_this_properties()) {
- Comment cmnt(masm(), ";; MapCheck(this)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(this)\n");
- }
- ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
- Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
- Handle<Map> map(object->map());
- EmitLoadReceiver();
- __ CheckMap(receiver_reg(), map, bailout_to_beginning, false);
- }
-
- // If there is a global variable access check if the global object is the
- // same as at lazy-compilation time.
- if (info()->has_globals()) {
- Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
- if (FLAG_print_ir) {
- PrintF("MapCheck(GLOBAL)\n");
- }
- ASSERT(info()->has_global_object());
- Handle<Map> map(info()->global_object()->map());
- __ movq(scratch0(), CodeGenerator::GlobalObject());
- __ CheckMap(scratch0(), map, bailout_to_beginning, true);
- }
-
- VisitStatements(info()->function()->body());
-
- Comment return_cmnt(masm(), ";; Return(<undefined>)");
- if (FLAG_print_ir) {
- PrintF("Return(<undefined>)\n");
- }
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 4d74735e..a5ccaf50 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -54,100 +54,98 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
- if (mode == PRIMARY) {
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = scope()->num_stack_slots();
+ if (locals_count == 1) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ } else if (locals_count > 1) {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
}
}
+ }
- bool function_in_register = true;
+ bool function_in_register = true;
- // Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewContext, 1);
- }
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(slot->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
- // clobbering rsi.
- __ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
- }
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
}
}
+ }
- // Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
- __ lea(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
- __ Push(Smi::FromInt(scope()->num_parameters()));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments->slot(), rax, rbx, rdx);
- Slot* dot_arguments_slot =
- scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, rcx, rbx, rdx);
+ // Possibly allocate an arguments object.
+ Variable* arguments = scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
+ // The receiver is just before the parameters on the caller's stack.
+ int offset = scope()->num_parameters() * kPointerSize;
+ __ lea(rdx,
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ push(rdx);
+ __ Push(Smi::FromInt(scope()->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -1053,7 +1051,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rcx); // Enumerable.
__ push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ SmiCompare(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_target());
__ movq(rbx, rax);
@@ -1332,12 +1330,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->constant_elements());
- if (expr->depth() > 1) {
+ if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(&Counters::cow_arrays_created_stub, 1);
+ } else if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- FastCloneShallowArrayStub stub(length);
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
__ CallStub(&stub);
}
@@ -2059,6 +2063,25 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+ // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
+ // used in a few functions in runtime.js which should not normally be hit by
+ // this compiler.
+ __ jmp(if_false);
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index a8971f5b..114ae84d 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -487,6 +487,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -515,10 +516,14 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// scratch - used to hold elements of the receiver and the loaded value.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode and writable.
+ __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, not_fast_array);
+ } else {
+ __ AssertFastElements(elements);
+ }
// Check that the key (index) is within bounds.
__ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
// Unsigned comparison rejects negative indices.
@@ -611,13 +616,19 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in rcx.
+ __ testb(FieldOperand(rcx, Map::kBitField2Offset),
+ Immediate(1 << Map::kHasFastElements));
+ __ j(zero, &check_pixel_array);
+
GenerateFastArrayLoad(masm,
rdx,
rax,
rcx,
rbx,
rax,
- &check_pixel_array,
+ NULL,
&slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
@@ -626,7 +637,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements object is a pixel array.
// rdx: receiver
// rax: key
- // rcx: elements array
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
@@ -1012,7 +1023,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rdx: JSObject
// rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
+ // Check that the object is in fast mode and writable.
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
@@ -1075,8 +1086,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode; if it is the
- // length is always a smi.
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
__ bind(&array);
// rax: value
// rdx: receiver (a JSArray)
@@ -1862,6 +1873,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ j(not_equal, &miss);
// Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
__ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index bab01993..2f4b5f6a 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -262,6 +262,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ Label ok;
+ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ j(equal, &ok);
+ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ j(equal, &ok);
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L);
@@ -582,8 +597,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
- movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+ movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
cmpq(target, Operand(rsp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
@@ -783,8 +797,8 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
if (src->value() == 0) {
testq(dst, dst);
} else {
- Move(kScratchRegister, src);
- cmpq(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(src);
+ cmpq(dst, constant_reg);
}
}
@@ -1978,10 +1992,17 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
}
+void MacroAssembler::AbortIfSmi(Register object) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ Assert(NegateCondition(is_smi), "Operand is a smi");
+}
+
+
void MacroAssembler::AbortIfNotSmi(Register object) {
Label ok;
Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand not a smi");
+ Assert(is_smi, "Operand is not a smi");
}
@@ -2290,7 +2311,7 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index a294ad67..aedc3b9c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -582,6 +582,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
@@ -829,6 +832,8 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertFastElements(Register elements);
+
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 4c15715c..e0644cd6 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1084,16 +1084,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize);
} else {
+ Label call_builtin;
+
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
+ // Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
Factory::fixed_array_map());
- __ j(not_equal, &miss);
+ __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
+ Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1164,7 +1166,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Push the argument...
__ movq(Operand(rdx, 0), rcx);
// ... and fill the rest with holes.
- __ Move(kScratchRegister, Factory::the_hole_value());
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
@@ -1175,15 +1177,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
+
// Make new length a smi before returning it.
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
}
+ __ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1,
1);
@@ -1204,11 +1207,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
String* name,
CheckType check) {
// ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- ...
- // -- esp[(argc + 1) * 4] : receiver
+ // -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
@@ -1235,9 +1238,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
- // Check that the elements are in fast mode (not dictionary).
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::fixed_array_map());
- __ j(not_equal, &miss);
+ // Check that the elements are in fast mode and writable.
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &call_builtin);
// Get the array's length into rcx and calculate new length.
__ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1245,7 +1249,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ j(negative, &return_undefined);
// Get the last element.
- __ Move(r9, Factory::the_hole_value());
+ __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
__ movq(rax, FieldOperand(rbx,
rcx, times_pointer_size,
FixedArray::kHeaderSize));
@@ -1265,14 +1269,14 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
-
- __ Move(rax, Factory::undefined_value());
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1,
1);
+
__ bind(&miss);
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
@@ -2039,30 +2043,6 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
-// TODO(1241006): Avoid having lazy compile stubs specialized by the
-// number of arguments. It is not needed anymore.
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
-
- __ push(rdi); // function is also the parameter to the runtime call
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ pop(rdi);
-
- // Tear down temporary frame.
- __ LeaveInternalFrame();
-
- // Do a tail-call of the compiled function.
- __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rcx);
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* interceptor_holder,
LookupResult* lookup,