summaryrefslogtreecommitdiffstats
path: root/src/x64
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2011-05-24 12:43:12 +0100
committerSteve Block <steveblock@google.com>2011-05-24 13:42:09 +0100
commit1e0659c275bb392c045087af4f6b0d7565cb3d77 (patch)
tree09febd313ccef178417974f7b7098e89e20bfd93 /src/x64
parentb8e0da25ee8efac3bb05cd6b2730aafbd96119f4 (diff)
downloadandroid_external_v8-1e0659c275bb392c045087af4f6b0d7565cb3d77.tar.gz
android_external_v8-1e0659c275bb392c045087af4f6b0d7565cb3d77.tar.bz2
android_external_v8-1e0659c275bb392c045087af4f6b0d7565cb3d77.zip
Update V8 to r6768 as required by WebKit r78450
Change-Id: Ib8868ff7147a76547a8d1d85f257ebe8546a3d3f
Diffstat (limited to 'src/x64')
-rw-r--r--src/x64/assembler-x64-inl.h17
-rw-r--r--src/x64/assembler-x64.cc132
-rw-r--r--src/x64/assembler-x64.h73
-rw-r--r--src/x64/builtins-x64.cc30
-rw-r--r--src/x64/code-stubs-x64.cc639
-rw-r--r--src/x64/code-stubs-x64.h24
-rw-r--r--src/x64/codegen-x64.cc52
-rw-r--r--src/x64/codegen-x64.h1
-rw-r--r--src/x64/cpu-x64.cc3
-rw-r--r--src/x64/deoptimizer-x64.cc539
-rw-r--r--src/x64/disasm-x64.cc18
-rw-r--r--src/x64/frames-x64.h5
-rw-r--r--src/x64/full-codegen-x64.cc283
-rw-r--r--src/x64/ic-x64.cc351
-rw-r--r--src/x64/lithium-codegen-x64.cc1822
-rw-r--r--src/x64/lithium-codegen-x64.h62
-rw-r--r--src/x64/lithium-gap-resolver-x64.cc320
-rw-r--r--src/x64/lithium-gap-resolver-x64.h74
-rw-r--r--src/x64/lithium-x64.cc681
-rw-r--r--src/x64/lithium-x64.h460
-rw-r--r--src/x64/macro-assembler-x64.cc186
-rw-r--r--src/x64/macro-assembler-x64.h69
-rw-r--r--src/x64/stub-cache-x64.cc528
-rw-r--r--src/x64/virtual-frame-x64.cc13
-rw-r--r--src/x64/virtual-frame-x64.h3
25 files changed, 4975 insertions, 1410 deletions
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 1fe9eed4..285c0781 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -199,8 +199,10 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(int32_t));
}
}
@@ -236,6 +238,7 @@ void RelocInfo::set_target_address(Address target) {
Assembler::set_target_address_at(pc_, target);
} else {
Memory::Address_at(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
}
@@ -271,6 +274,7 @@ Address* RelocInfo::target_reference_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
*reinterpret_cast<Object**>(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -295,6 +299,7 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ CPU::FlushICache(pc_, sizeof(Address));
}
@@ -331,6 +336,8 @@ void RelocInfo::set_call_address(Address target) {
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
target;
+ CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
+ sizeof(Address));
}
@@ -356,10 +363,14 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -379,10 +390,14 @@ void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -414,7 +429,7 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+ buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index de01cfa3..697f6cd4 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -300,6 +300,34 @@ Operand::Operand(const Operand& operand, int32_t offset) {
}
}
+
+bool Operand::AddressUsesRegister(Register reg) const {
+ int code = reg.code();
+ ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
+ // Start with only low three bits of base register. Initial decoding doesn't
+ // distinguish on the REX.B bit.
+ int base_code = buf_[0] & 0x07;
+ if (base_code == rsp.code()) {
+ // SIB byte present in buf_[1].
+ // Check the index register from the SIB byte + REX.X prefix.
+ int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
+ // Index code (including REX.X) of 0x04 (rsp) means no index register.
+ if (index_code != rsp.code() && index_code == code) return true;
+ // Add REX.B to get the full base register code.
+ base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
+ // A base register of 0x05 (rbp) with mod = 0 means no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ return code == base_code;
+ } else {
+ // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
+ // no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ base_code |= ((rex_ & 0x01) << 3);
+ return code == base_code;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -888,6 +916,23 @@ void Assembler::call(const Operand& op) {
}
+// Calls directly to the given address using a relative offset.
+// Should only ever be used in Code objects for calls within the
+// same Code object. Should not be used when generating new code (use labels),
+// but only when patching existing code.
+void Assembler::call(Address target) {
+ positions_recorder()->WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp.
+ emit(0xE8);
+ Address source = pc_ + 4;
+ intptr_t displacement = target - source;
+ ASSERT(is_int32(displacement));
+ emitl(static_cast<int32_t>(displacement));
+}
+
+
void Assembler::clc() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1143,6 +1188,16 @@ void Assembler::imull(Register dst, Register src) {
}
+void Assembler::imull(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_operand(dst, src);
+}
+
+
void Assembler::imull(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1949,6 +2004,14 @@ void Assembler::push(Immediate value) {
}
+void Assembler::push_imm32(int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x68);
+ emitl(imm32);
+}
+
+
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2641,6 +2704,30 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);
@@ -2721,6 +2808,17 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
}
+void Assembler::cvttss2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2732,6 +2830,17 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
}
+void Assembler::cvttsd2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2930,6 +3039,16 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x50);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
@@ -2967,10 +3086,15 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index be837f04..91e7e6cc 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -153,6 +153,7 @@ struct Register {
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
int code_;
+
private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters];
@@ -390,11 +391,15 @@ class Operand BASE_EMBEDDED {
// this must not overflow.
Operand(const Operand& base, int32_t offset);
+ // Checks whether either base or index register is the given register.
+ // Does not check the "reg" part of the Operand.
+ bool AddressUsesRegister(Register reg) const;
+
private:
byte rex_;
byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
+ // The number of bytes of buf_ in use.
+ byte len_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -548,16 +553,29 @@ class Assembler : public Malloced {
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
- // The x64 JS return sequence is padded with int3 to make it large
- // enough to hold a call instruction when the debugger patches it.
+ // Some x64 JS code is padded with int3 to make it large
+ // enough to hold an instruction when the debugger patches it.
+ static const int kJumpInstructionLength = 13;
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
+ static const int kShortCallInstructionLength = 5;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
+ // One byte opcode for test al, 0xXX.
+ static const byte kTestAlByte = 0xA8;
+ // One byte opcode for nop.
+ static const byte kNopByte = 0x90;
+
+ // One byte prefix for a short conditional jump.
+ static const byte kJccShortPrefix = 0x70;
+ static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+ static const byte kJcShortOpcode = kJccShortPrefix | carry;
+
+
// ---------------------------------------------------------------------------
// Code generation
@@ -580,7 +598,7 @@ class Assembler : public Malloced {
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
+ // of m, where m must be a power of 2.
void Align(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -590,6 +608,9 @@ class Assembler : public Malloced {
void popfq();
void push(Immediate value);
+ // Push a 32 bit integer, and guarantee that it is actually pushed as a
+ // 32 bit value, the normal push will optimize the 8 bit case.
+ void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
@@ -707,6 +728,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x1b, dst, src);
}
+ void sbbq(Register dst, Register src) {
+ arithmetic_op(0x1b, dst, src);
+ }
+
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -817,6 +842,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x23, dst, src);
}
+ void andl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x23, dst, src);
+ }
+
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
@@ -845,6 +874,7 @@ class Assembler : public Malloced {
void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Signed 32-bit multiply instructions.
void imull(Register dst, Register src); // dst = dst * src.
+ void imull(Register dst, const Operand& src); // dst = dst * src.
void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
void incq(Register dst);
@@ -878,6 +908,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src);
}
+ void orl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+
void or_(const Operand& dst, Register src) {
arithmetic_op(0x09, src, dst);
}
@@ -1041,6 +1075,18 @@ class Assembler : public Malloced {
arithmetic_op_32(0x33, dst, src);
}
+ void xorl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x33, dst, src);
+ }
+
+ void xorl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
+ void xorl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
@@ -1095,6 +1141,12 @@ class Assembler : public Malloced {
void call(Label* L);
void call(Handle<Code> target, RelocInfo::Mode rmode);
+ // Calls directly to the given address using a relative offset.
+ // Should only ever be used in Code objects for calls within the
+ // same Code object. Should not be used when generating new code (use labels),
+ // but only when patching existing code.
+ void call(Address target);
+
// Call near absolute indirect, address in register
void call(Register adr);
@@ -1201,11 +1253,16 @@ class Assembler : public Malloced {
void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
+ void movdqa(const Operand& dst, XMMRegister src);
+ void movdqa(XMMRegister dst, const Operand& src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
@@ -1233,16 +1290,14 @@ class Assembler : public Malloced {
void ucomisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, const Operand& src);
+ void movmskpd(Register dst, XMMRegister src);
+
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
- // Use either movsd or movlpd.
- // void movdbl(XMMRegister dst, const Operand& src);
- // void movdbl(const Operand& dst, XMMRegister src);
-
// Debugging
void Print();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index d738261a..08cd21d4 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -561,7 +561,33 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ int3();
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Get the full codegen state from the stack and untag it.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Switch on the state.
+ NearLabel not_no_registers, not_tos_rax;
+ __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ j(not_equal, &not_no_registers);
+ __ ret(1 * kPointerSize); // Remove state.
+
+ __ bind(&not_no_registers);
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ j(not_equal, &not_tos_rax);
+ __ ret(2 * kPointerSize); // Remove state, rax.
+
+ __ bind(&not_tos_rax);
+ __ Abort("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
@@ -570,7 +596,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 59522d22..4b4531eb 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,6 +37,28 @@ namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ NearLabel check_heap_number, call_builtin;
+ __ SmiTest(rax);
+ __ j(not_zero, &check_heap_number);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &call_builtin);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ pop(rcx); // Pop return address.
+ __ push(rax);
+ __ push(rcx); // Push return address.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
@@ -1015,29 +1037,6 @@ void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
}
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- // Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
switch (operands_type_) {
case TRBinaryOpIC::UNINITIALIZED:
@@ -1047,7 +1046,9 @@ void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateSmiStub(masm);
break;
case TRBinaryOpIC::INT32:
- GenerateInt32Stub(masm);
+ UNREACHABLE();
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
break;
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
@@ -1090,54 +1091,337 @@ const char* TypeRecordingBinaryOpStub::GetName() {
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- UNIMPLEMENTED();
-}
+ // We only generate heapnumber answers for overflowing calculations
+ // for the four basic arithmetic operations.
+ bool generate_inline_heapnumber_results =
+ (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+ (op_ == Token::ADD || op_ == Token::SUB ||
+ op_ == Token::MUL || op_ == Token::DIV);
+
+ // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+ Register left = rdx;
+ Register right = rax;
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
+ // Smi check of both operands. If op is BIT_OR, the check is delayed
+ // until after the OR operation.
+ Label not_smis;
+ Label use_fp_on_smis;
+ Label restore_MOD_registers; // Only used if op_ == Token::MOD.
+
+ if (op_ != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+
+ // Perform the operation.
+ Comment perform_smi(masm, "-- Perform smi operation");
switch (op_) {
case Token::ADD:
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+
case Token::SUB:
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+
case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
case Token::DIV:
+ // SmiDiv will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiDiv(rax, left, right, &use_fp_on_smis);
break;
+
case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
+ // SmiMod will not accept left in rdx or right in rax.
+ left = rcx;
+ right = rbx;
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiMod(rax, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::BIT_OR: {
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
+ break;
+ }
case Token::BIT_XOR:
- case Token::SAR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ __ movq(rax, left);
+ break;
+
case Token::SHR:
- GenerateRegisterArgsPush(masm);
+ __ SmiShiftLogicalRight(left, left, right, &not_smis);
+ __ movq(rax, left);
break;
+
default:
UNREACHABLE();
}
- if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
- result_type_ == TRBinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
- } else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ // 5. Emit return of result in rax. Some operations have registers pushed.
+ __ ret(0);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
}
- __ bind(&call_runtime);
+
+
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ }
+
+ // 7. Non-smi operands reach the end of the code generated by
+ // GenerateSmiCode, and fall through to subsequent code,
+ // with the operands in rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ if (op_ == Token::BIT_OR) {
+ __ movq(right, rcx);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
+ MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
+ case Token::DIV: {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, allocation_failure);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we jump to the allocation_failure label, to call runtime.
+ __ jmp(allocation_failure);
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
+ heap_number_map);
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ __ Ret();
+
+ // Logical shift right can produce an unsigned int32 that is not
+ // an int32, and so is not in the smi range. Allocate a heap number
+ // in that case.
+ if (op_ == Token::SHR) {
+ __ bind(&non_smi_shr_result);
+ Label allocation_failed;
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &allocation_failed,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ Ret();
+
+ __ bind(&allocation_failed);
+ // We need tagged values in rdx and rax for the following code,
+ // not int32 in rax and rcx.
+ __ Integer32ToSmi(rax, rcx);
+ __ Integer32ToSmi(rdx, rax);
+ __ jmp(allocation_failure);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ // No fall-through from this generated code.
+ if (FLAG_debug_code) {
+ __ Abort("Unexpected fall-through in "
+ "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ // Registers containing left and right operands respectively.
+ Register lhs = rdx;
+ Register rhs = rax;
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ __ JumpIfNotString(lhs, r8, &not_string1);
+
+ // First argument is a a string, test second.
+ __ JumpIfSmi(rhs, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ JumpIfNotString(rhs, rhs, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ // Pop arguments, because CallRuntimeCode wants to push them again.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
case Token::DIV:
- GenerateTypeTransition(masm);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
@@ -1145,30 +1429,90 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smi;
+
+ GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
+
+ __ bind(&not_smi);
+ GenerateTypeTransition(masm);
}
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ GenerateStringAddCode(masm);
+
+ GenerateTypeTransition(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label gc_required, not_number;
+ GenerateFloatingPointCode(masm, &gc_required, &not_number);
+
+ __ bind(&not_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&gc_required);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateStringAddCode(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntimeCode(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
- UNIMPLEMENTED();
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in rdx is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rdx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ // Use object in rdx as a result holder
+ __ movq(rax, rdx);
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
}
@@ -1490,6 +1834,7 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
+// Jump to conversion_failure: rdx and rax are unchanged.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label* conversion_failure,
Register heap_number_map) {
@@ -1499,28 +1844,27 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
Label load_arg2, done;
__ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(r8, rdx);
__ jmp(&load_arg2);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
__ bind(&check_undefined_arg1);
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
+ __ movl(r8, Immediate(0));
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
+ // Get the untagged integer version of the rdx heap number in rcx.
+ IntegerConvert(masm, r8, rdx);
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ // Here r8 has the untagged integer, rax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
__ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
+ __ SmiToInteger32(rcx, rax);
__ jmp(&done);
// If the argument is undefined it converts to zero (ECMA-262, section 9.5).
@@ -1536,7 +1880,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
// Get the untagged integer version of the rax heap number in rcx.
IntegerConvert(masm, rcx, rax);
__ bind(&done);
- __ movl(rax, rdx);
+ __ movl(rax, r8);
}
@@ -1866,11 +2210,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
// Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
+ // rsp[0]: return address
+ // rsp[8]: last_match_info (expected JSArray)
+ // rsp[16]: previous index
+ // rsp[24]: subject string
+ // rsp[32]: JSRegExp object
static const int kLastMatchInfoOffset = 1 * kPointerSize;
static const int kPreviousIndexOffset = 2 * kPointerSize;
@@ -2212,7 +2556,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // Allocate RegExpResult followed by FixedArray with size in rbx.
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
@@ -2271,7 +2615,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
- __ j(less_equal, &done); // Jump if ecx is negative or zero.
+ __ j(less_equal, &done); // Jump if rcx is negative or zero.
__ subl(rbx, Immediate(1));
__ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
__ jmp(&loop);
@@ -2634,7 +2978,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// undefined, and are equal.
__ Set(rax, EQUAL);
__ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
+ // Return non-equal by returning the non-zero object pointer in rax,
// or return equal if we fell through to here.
__ ret(0);
__ bind(&not_both_objects);
@@ -2774,8 +3118,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
+ bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
@@ -2868,7 +3211,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame();
+ __ LeaveExitFrame(save_doubles_);
__ ret(0);
// Handling of failure.
@@ -2977,7 +3320,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#else
int arg_stack_space = 0;
#endif
- __ EnterExitFrame(arg_stack_space);
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -3130,7 +3473,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
+ // If current RBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ movq(kScratchRegister, js_entry_sp);
__ cmpq(rbp, Operand(kScratchRegister, 0));
@@ -3248,6 +3591,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+Register InstanceofStub::left() { return rax; }
+
+
+Register InstanceofStub::right() { return rdx; }
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
@@ -4272,24 +4621,168 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::SMIS);
+ NearLabel miss;
+ __ JumpIfNotBothSmi(rdx, rax, &miss);
+
+ if (GetCondition() == equal) {
+ // For equality we do not care about the sign of the result.
+ __ subq(rax, rdx);
+ } else {
+ NearLabel done;
+ __ subq(rdx, rax);
+ __ j(no_overflow, &done);
+ // Correct sign of result in case of overflow.
+ __ SmiNot(rdx, rdx);
+ __ bind(&done);
+ __ movq(rax, rdx);
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ NearLabel generic_stub;
+ NearLabel unordered;
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rax, rdx);
+ __ j(either_smi, &generic_stub);
+
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+
+ // Load left and right operand
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Compare operands
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ // Performing mov, because xor would destroy the flag register.
+ __ movl(rax, Immediate(0));
+ __ movl(rcx, Immediate(0));
+ __ setcc(above, rax); // Add one to zero if carry clear and not equal.
+ __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ ret(0);
+
+ __ bind(&unordered);
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+ __ bind(&generic_stub);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::OBJECTS);
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss);
+
+ __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+
+ ASSERT(GetCondition() == equal);
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ // Save the registers.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rcx);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ __ EnterInternalFrame();
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+
+ // Restore registers.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+
+ // Do a tail call to the rewritten stub.
+ __ jmp(rdi);
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key and is unchanged (must be a smi).
+ // elements - is set to the the receiver's element if
+ // the receiver doesn't have a pixel array or the
+ // key is not a smi, otherwise it's the elements'
+ // external pointer.
+ // untagged_key - is set to the untagged key
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiToInteger32(untagged_key, key);
+
+ // Verify that the receiver has pixel array elements.
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+
+ // Check that the smi is in range.
+ __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Load and tag the element as a smi.
+ __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movzxbq(result, Operand(elements, untagged_key, times_1, 0));
+ __ Integer32ToSmi(result, result);
+ __ ret(0);
}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 9feced2f..8051d4bd 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -270,6 +270,11 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure);
+ void GenerateStringAddCode(MacroAssembler* masm);
+ void GenerateCallRuntimeCode(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
@@ -447,6 +452,25 @@ class NumberToStringStub: public CodeStub {
};
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register untagged_key,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
} } // namespace v8::internal
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a543a504..fe905670 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2993,21 +2993,22 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
- masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, rcx);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint.
- // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
+ // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
// with length 7 (3 + 1 + 3).
const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@@ -4893,7 +4894,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
if (property->emit_store()) {
Result ignored =
- frame_->CallStoreIC(Handle<String>::cast(key), false);
+ frame_->CallStoreIC(Handle<String>::cast(key), false,
+ strict_mode_flag());
// A test rax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
@@ -5402,9 +5404,12 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
done.Jump(&result);
slow.Bind();
@@ -5421,8 +5426,11 @@ void CodeGenerator::VisitCall(Call* node) {
}
frame_->PushParameterAt(-1);
+ // Push the strict mode flag.
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+
// Resolve the call.
- result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// If we generated fast-case code bind the jump-target where fast
// and slow case merge.
@@ -6969,10 +6977,12 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ j(not_equal, &not_minus_half);
// Calculates reciprocal of square root.
- // Note that 1/sqrt(x) = sqrt(1/x))
- __ divsd(xmm3, xmm0);
- __ movsd(xmm1, xmm3);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
@@ -6985,7 +6995,9 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
call_runtime.Branch(not_equal);
// Calculates square root.
- __ movsd(xmm1, xmm0);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
JumpTarget done;
@@ -7235,19 +7247,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
+ // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(rsi);
frame_->EmitPush(variable->name());
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}
@@ -8229,7 +8235,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test rax
// instruction here.
@@ -8329,7 +8335,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
- result = frame()->CallStoreIC(name, is_contextual);
+ result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test rax
// instruction.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index b308f64c..c283db3a 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -357,6 +357,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
inline bool is_eval();
inline Scope* scope();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 30134bf1..513c5228 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -43,6 +43,9 @@ namespace internal {
void CPU::Setup() {
CpuFeatures::Probe(true);
+ if (Serializer::enabled()) {
+ V8::DisableCrankshaft();
+ }
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 6b19d3f1..ed6c47bf 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,19 +40,176 @@ namespace internal {
int Deoptimizer::table_entry_size_ = 10;
+
+int Deoptimizer::patch_size() {
+ return MacroAssembler::kCallInstructionLength;
+}
+
+
+#ifdef DEBUG
+// Overwrites code with int3 instructions.
+static void ZapCodeRange(Address from, Address to) {
+ CHECK(from <= to);
+ int length = static_cast<int>(to - from);
+ CodePatcher destroyer(from, length);
+ while (length-- > 0) {
+ destroyer.masm()->int3();
+ }
+}
+#endif
+
+
+// Iterate through the entries of a SafepointTable that corresponds to
+// deoptimization points.
+class SafepointTableDeoptimiztionEntryIterator {
+ public:
+ explicit SafepointTableDeoptimiztionEntryIterator(Code* code)
+ : code_(code), table_(code), index_(-1), limit_(table_.length()) {
+ FindNextIndex();
+ }
+
+ SafepointEntry Next(Address* pc) {
+ if (index_ >= limit_) {
+ *pc = NULL;
+ return SafepointEntry(); // Invalid entry.
+ }
+ *pc = code_->instruction_start() + table_.GetPcOffset(index_);
+ SafepointEntry entry = table_.GetEntry(index_);
+ FindNextIndex();
+ return entry;
+ }
+
+ private:
+ void FindNextIndex() {
+ ASSERT(index_ < limit_);
+ while (++index_ < limit_) {
+ if (table_.GetEntry(index_).deoptimization_index() !=
+ Safepoint::kNoDeoptimizationIndex) {
+ return;
+ }
+ }
+ }
+
+ Code* code_;
+ SafepointTable table_;
+ // Index of next deoptimization entry. If negative after calling
+ // FindNextIndex, there are no more, and Next will return an invalid
+ // SafepointEntry.
+ int index_;
+ // Table length.
+ int limit_;
+};
+
+
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- // UNIMPLEMENTED, for now just return.
- return;
+ AssertNoAllocation no_allocation;
+
+ if (!function->IsOptimized()) return;
+
+ // Get the optimized code.
+ Code* code = function->code();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each return after a safepoint insert a absolute call to the
+ // corresponding deoptimization entry, or a short call to an absolute
+ // jump if space is short. The absolute jumps are put in a table just
+ // before the safepoint table (space was allocated there when the Code
+ // object was created, if necessary).
+
+ Address instruction_start = function->code()->instruction_start();
+ Address jump_table_address =
+ instruction_start + function->code()->safepoint_table_offset();
+ Address previous_pc = instruction_start;
+
+ SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
+ Address entry_pc = NULL;
+
+ SafepointEntry current_entry = deoptimizations.Next(&entry_pc);
+ while (current_entry.is_valid()) {
+ int gap_code_size = current_entry.gap_code_size();
+ unsigned deoptimization_index = current_entry.deoptimization_index();
+
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ ZapCodeRange(previous_pc, entry_pc);
+#endif
+ // Position where Call will be patched in.
+ Address call_address = entry_pc + gap_code_size;
+ // End of call instruction, if using a direct call to a 64-bit address.
+ Address call_end_address =
+ call_address + MacroAssembler::kCallInstructionLength;
+
+ // Find next deoptimization entry, if any.
+ Address next_pc = NULL;
+ SafepointEntry next_entry = deoptimizations.Next(&next_pc);
+
+ if (!next_entry.is_valid() || next_pc >= call_end_address) {
+ // Room enough to write a long call instruction.
+ CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
+ patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+ previous_pc = call_end_address;
+ } else {
+ // Not room enough for a long Call instruction. Write a short call
+ // instruction to a long jump placed elsewhere in the code.
+ Address short_call_end_address =
+ call_address + MacroAssembler::kShortCallInstructionLength;
+ ASSERT(next_pc >= short_call_end_address);
+
+ // Write jump in jump-table.
+ jump_table_address -= MacroAssembler::kJumpInstructionLength;
+ CodePatcher jump_patcher(jump_table_address,
+ MacroAssembler::kJumpInstructionLength);
+ jump_patcher.masm()->Jump(
+ GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+
+ // Write call to jump at call_offset.
+ CodePatcher call_patcher(call_address,
+ MacroAssembler::kShortCallInstructionLength);
+ call_patcher.masm()->call(jump_table_address);
+ previous_pc = short_call_end_address;
+ }
+
+ // Continue with next deoptimization entry.
+ current_entry = next_entry;
+ entry_pc = next_pc;
+ }
+
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ ZapCodeRange(previous_pc, jump_table_address);
+#endif
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ node->set_next(deoptimizing_code_list_);
+ deoptimizing_code_list_ = node;
+
+ // Set the code for the function to non-optimized version.
+ function->ReplaceCode(function->shared()->code());
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
- Code* replacement_code) {
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
@@ -64,20 +221,382 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
- UNIMPLEMENTED();
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ int node_id = iterator->Next();
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ intptr_t top_address;
+ if (is_bottommost) {
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the function so long as we don't
+ // optimize functions that need local contexts.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function->context());
+ // The context for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) output_frame->SetRegister(rsi.code(), value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost) {
+ Code* continuation = (bailout_type_ == EAGER)
+ ? Builtins::builtin(Builtins::NotifyDeoptimized)
+ : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+
+ if (output_count_ - 1 == frame_index) iterator->Done();
}
+#define __ masm()->
+
void Deoptimizer::EntryGenerator::Generate() {
- // UNIMPLEMENTED, for now just return.
- return;
+ GeneratePrologue();
+ CpuFeatures::Scope scope(SSE2);
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize = kDoubleSize *
+ XMMRegister::kNumAllocatableRegisters;
+ __ subq(rsp, Immediate(kDoubleRegsSize));
+
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movsd(Operand(rsp, offset), xmm_reg);
+ }
+
+ // We push all registers onto the stack, even though we do not need
+ // to restore all later.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ Register r = Register::toRegister(i);
+ __ push(r);
+ }
+
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ kDoubleRegsSize;
+
+ // When calling new_deoptimizer_function we need to pass the last argument
+ // on the stack on windows and in r8 on linux. The remaining arguments are
+ // all passed in registers (different ones on linux and windows though).
+
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // We use this to keep the value of the fifth argument temporarily.
+ // Unfortunately we can't store it directly in r8 (used for passing
+ // this on linux), since it is another parameter passing register on windows.
+ Register arg5 = r11;
+
+ // Get the bailout id from the stack.
+ __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object if possible
+ // and compute the fp-to-sp delta in register arg5.
+ if (type() == EAGER) {
+ __ Set(arg4, 0);
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ } else {
+ __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ }
+
+ __ subq(arg5, rbp);
+ __ neg(arg5);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(5);
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(arg1, rax);
+ __ movq(arg2, Immediate(type()));
+ // Args 3 and 4 are already in the right registers.
+
+ // On windows put the argument on the stack (PrepareCallCFunction have
+ // created space for this). On linux pass the argument in r8.
+#ifdef _WIN64
+ __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+#else
+ __ movq(r8, arg5);
+#endif
+
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ // Preserve deoptimizer object in register rax and get the input
+ // frame descriptor pointer.
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters -1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(rbx, offset));
+ }
+
+ // Fill in the double input registers.
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ __ pop(Operand(rbx, dst_offset));
+ }
+
+ // Remove the bailout id from the stack.
+ if (type() == EAGER) {
+ __ addq(rsp, Immediate(kPointerSize));
+ } else {
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ }
+
+ // Compute a pointer to the unwinding limit in register rcx; that is
+ // the first stack slot not part of the input frame.
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(rdx, 0));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ cmpq(rcx, rsp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(rax);
+ __ PrepareCallCFunction(1);
+ __ movq(arg1, rax);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ pop(rax);
+
+ // Replace the current frame with the output frames.
+ Label outer_push_loop, inner_push_loop;
+ // Outer loop state: rax = current FrameDescription**, rdx = one past the
+ // last FrameDescription**.
+ __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ bind(&outer_push_loop);
+ // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ bind(&inner_push_loop);
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
+ __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ testq(rcx, rcx);
+ __ j(not_zero, &inner_push_loop);
+ __ addq(rax, Immediate(kPointerSize));
+ __ cmpq(rax, rdx);
+ __ j(below, &outer_push_loop);
+
+ // In case of OSR, we have to restore the XMM registers.
+ if (type() == OSR) {
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(rbx, src_offset));
+ }
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ if (type() != OSR) {
+ __ push(Operand(rbx, FrameDescription::state_offset()));
+ }
+ __ push(Operand(rbx, FrameDescription::pc_offset()));
+ __ push(Operand(rbx, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(rbx, offset));
+ }
+
+ // Restore the registers from the stack.
+ for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
+ Register r = Register::toRegister(i);
+ // Do not restore rsp, simply pop the value into the next register
+ // and overwrite this afterwards.
+ if (r.is(rsp)) {
+ ASSERT(i > 0);
+ r = Register::toRegister(i - 1);
+ }
+ __ pop(r);
+ }
+
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(r13, roots_address);
+
+ __ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+
+ // Return to the continuation point.
+ __ ret(0);
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
+ // Create a sequence of deoptimization entries.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ push_imm32(i);
+ __ jmp(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
}
+#undef __
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7502d618..f73f9484 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1025,11 +1025,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x6F) {
+ AppendToBuffer("movdqa %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x7F) {
+ AppendToBuffer("movdqa ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
if (opcode == 0x57) {
@@ -1038,6 +1046,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x50) {
+ mnemonic = "movmskpd";
} else {
UnimplementedInstruction();
}
@@ -1113,9 +1123,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x2C) {
// CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer.
- // Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT_NE(0xC0, *current & 0xC0);
- current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("cvttss2si%c %s,",
+ operand_size_code(), NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x5A) {
// CVTSS2SD:
// Convert scalar single-precision FP to scalar double-precision FP.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a2a0e7e9..998b3e9f 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,7 +31,7 @@
namespace v8 {
namespace internal {
-static const int kNumRegs = 8;
+static const int kNumRegs = 16;
static const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
@@ -44,8 +44,7 @@ static const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
-// TODO(x64): This should not be 0.
-static const int kNumSafepointRegisters = 8;
+static const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 724a7c59..556ec852 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -43,6 +43,58 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm)
+ : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(not_carry, target); // Always taken before patched.
+ }
+
+ void EmitJumpIfSmi(Register reg, NearLabel* target) {
+ __ testb(reg, Immediate(kSmiTagMask));
+ EmitJump(carry, target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ testl(rax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ }
+
+ bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+ // jc will be patched with jz, jnc will become jnz.
+ void EmitJump(Condition cc, NearLabel* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ ASSERT(cc == carry || cc == not_carry);
+ __ bind(&patch_site_);
+ __ j(cc, target);
+ }
+
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -245,19 +297,22 @@ void FullCodeGenerator::EmitReturnSequence() {
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((scope()->num_parameters() + 1) * kPointerSize);
+
+ int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ __ Ret(arguments_bytes, rcx);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+ // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
// (3 + 1 + 3).
const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceLength <=
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
@@ -659,18 +714,24 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- VisitForStackValue(prop->obj());
+ // property. Use (keyed) IC to set the initial value. We
+ // cannot visit the rewrite because it's shared and we risk
+ // recording duplicate AST IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
if (function != NULL) {
- VisitForStackValue(prop->key());
+ __ push(rax);
VisitForAccumulatorValue(function);
- __ pop(rcx);
+ __ pop(rdx);
} else {
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, result_register());
- __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ __ movq(rdx, rax);
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
}
- __ pop(rdx);
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -710,6 +771,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
+ clause->body_target()->entry_label()->Unuse();
+
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@@ -726,21 +789,25 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ movq(rdx, Operand(rsp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rdx, rax, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+
+ __ cmpq(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()->entry_label());
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(equal, true, flags);
- __ CallStub(&stub);
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ EmitCallIC(ic, &patch_site);
+
__ testq(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -1520,21 +1587,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
- Label done, stub_call, smi_case;
+ NearLabel done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
- Condition smi = masm()->CheckBothSmi(rdx, rax);
- __ j(smi, &smi_case);
+ __ or_(rax, rdx);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(rax, &smi_case);
__ bind(&stub_call);
- GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
- if (stub.ArgsInRegistersSupported()) {
- stub.GenerateCall(masm_, rdx, rcx);
- } else {
- __ push(rdx);
- __ push(rcx);
- __ CallStub(&stub);
- }
+ __ movq(rax, rcx);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
__ bind(&smi_case);
@@ -1578,14 +1641,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
- GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
- if (stub.ArgsInRegistersSupported()) {
- __ pop(rdx);
- stub.GenerateCall(masm_, rdx, rax);
- } else {
- __ push(result_register());
- __ CallStub(&stub);
- }
+ __ pop(rdx);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
context()->Plug(rax);
}
@@ -1657,8 +1715,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// rcx, and the global object on the stack.
__ Move(rcx, var->name());
__ movq(rdx, GlobalObjectOperand());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- EmitCallIC(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@@ -1932,7 +1992,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+ // Push the strict mode flag.
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2006,16 +2068,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
// for a regular property use keyed EmitCallIC.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(prop->obj());
- }
if (prop->is_synthetic()) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForAccumulatorValue(prop->key());
- }
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, rdx);
+ __ movq(rdx, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ Move(rax, prop->key()->AsLiteral()->handle());
+
// Record source code position for IC call.
SetSourcePosition(prop->position());
- __ pop(rdx); // We do not need to keep the receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -2026,6 +2093,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
EmitCallWithStub(expr);
} else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
}
}
@@ -2992,26 +3062,29 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
- } else {
- // Property or variable reference. Call the delete builtin with
- // object and property name as arguments.
- if (prop != NULL) {
+ } else if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ Push(var->name());
- } else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(rax);
- __ Push(var->name());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(rax);
}
+ } else if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ Push(var->name());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(rax);
}
break;
}
@@ -3054,8 +3127,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label no_conversion;
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, &no_conversion);
- __ push(result_register());
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
context()->Plug(result_register());
break;
@@ -3171,8 +3244,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Condition is_smi;
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &no_conversion);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
__ bind(&no_conversion);
// Save result for postfix expressions.
@@ -3196,7 +3269,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
- Label stub_call, done;
+ NearLabel stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
@@ -3206,8 +3281,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &done);
+ patch_site.EmitJumpIfSmi(rax, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
@@ -3221,10 +3296,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- GenericBinaryOpStub stub(expr->binary_op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- stub.GenerateCall(masm_, rax, Smi::FromInt(1));
+ TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ if (expr->op() == Token::INC) {
+ __ Move(rdx, Smi::FromInt(1));
+ } else {
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
+ }
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in rax.
@@ -3494,19 +3573,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
+ NearLabel slow_case;
+ __ movq(rcx, rdx);
+ __ or_(rcx, rax);
+ patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+ __ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
- CompareFlags flags = inline_smi_code
- ? NO_SMI_COMPARE_IN_STUB
- : NO_COMPARE_FLAGS;
- CompareStub stub(cc, strict, flags);
- __ CallStub(&stub);
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -3569,10 +3650,30 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
ASSERT(mode == RelocInfo::CODE_TARGET ||
mode == RelocInfo::CODE_TARGET_CONTEXT);
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, mode);
// Crankshaft doesn't need patching of inlined loads and stores.
- if (V8::UseCrankshaft()) return;
+ // When compiling the snapshot we need to produce code that works
+ // with and without Crankshaft.
+ if (V8::UseCrankshaft() && !Serializer::enabled()) {
+ return;
+ }
// If we're calling a (keyed) load or store stub, we have to mark
// the call as containing no inlined code so we will not attempt to
@@ -3591,6 +3692,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
}
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (patch_site != NULL && patch_site->is_bound()) {
+ patch_site->EmitPatchInfo();
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+}
+
+
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index b54aeb97..8c2856f8 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -397,7 +397,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -405,7 +405,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
+ support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -579,20 +580,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&check_pixel_array);
- // Check whether the elements object is a pixel array.
- // rdx: receiver
- // rax: key
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kPixelArrayMapRootIndex);
- __ j(not_equal, &check_number_dictionary);
- __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
+ GenerateFastPixelArrayLoad(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rax,
+ &check_number_dictionary,
+ NULL,
+ &slow);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
@@ -727,131 +723,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the object is a JS object.
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: index (as a smi)
- // rdx: JSObject
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalIntArray:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- NearLabel box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
@@ -1023,149 +894,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the object is a JS object.
- __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- NearLabel check_heap_number;
- __ JumpIfNotSmi(rax, &check_heap_number);
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Need to perform float-to-int conversion.
- // Test the value for NaN.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvtsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvtsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvtsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm);
-}
-
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
@@ -1745,7 +1473,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1756,7 +1485,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
- MONOMORPHIC);
+ MONOMORPHIC,
+ extra_ic_state);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
@@ -1945,11 +1675,23 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
+static bool HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
@@ -1967,10 +1709,43 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_));
}
#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
}
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test al, nothing
+ // was inlined.
+ if (*test_instruction_address != Assembler::kTestAlByte) {
+ ASSERT(*test_instruction_address == Assembler::kNopByte);
+ return;
+ }
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ address, test_instruction_address, delta);
+ }
+
+ // Patch with a short conditional jump. There must be a
+ // short jump-if-carry/not-carry at this position.
+ Address jmp_address = test_instruction_address - delta;
+ ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+ *jmp_address == Assembler::kJcShortOpcode);
+ Condition cc = *jmp_address == Assembler::kJncShortOpcode
+ ? not_zero
+ : zero;
+ *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 151fad73..36c9aac2 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -37,157 +37,6 @@ namespace v8 {
namespace internal {
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -204,7 +53,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -339,6 +188,20 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ // Ensure that there is space at the end of the code to write a number
+ // of jump instructions, as well as to afford writing a call near the end
+ // of the code.
+ // The jumps are used when there isn't room in the code stream to write
+ // a long call instruction. Instead it writes a shorter call to a
+ // jump instruction in the same code object.
+ // The calls are used when lazy deoptimizing a function and calls to a
+ // deoptimization function.
+ int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
+ static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
+ int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
+ while (byte_count-- > 0) {
+ __ int3();
+ }
safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
}
@@ -493,17 +356,11 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
- if (instr != NULL) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RegisterLazyDeoptimization(instr);
- } else {
- LPointerMap no_pointers(0);
- RecordPosition(no_pointers.position());
- __ call(code, mode);
- RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
- }
+ ASSERT(instr != NULL);
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ call(code, mode);
+ RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
@@ -517,7 +374,13 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallRuntime");
+ ASSERT(instr != NULL);
+ ASSERT(instr->HasPointerMap());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ __ CallRuntime(function, num_arguments);
+ RegisterLazyDeoptimization(instr);
}
@@ -567,7 +430,24 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- Abort("Unimplemented: %s", "Deoptimiz");
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(entry != NULL);
+ if (entry == NULL) {
+ Abort("bailout was not prepared");
+ return;
+ }
+
+ if (cc == no_condition) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ NearLabel done;
+ __ j(NegateCondition(cc), &done);
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ __ bind(&done);
+ }
}
@@ -629,37 +509,41 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
+
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- deoptimization_index);
+ kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
+ if (kind & Safepoint::kWithRegisters) {
+ // Register rsi always contains a pointer to the context.
+ safepoint.DefinePointerRegister(rsi);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegisters(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi);
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
}
@@ -682,86 +566,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // xmm0 must always be a scratch register.
- XMMRegister xmm_scratch = xmm0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register cpu_scratch = kScratchRegister;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
- if (from->IsConstantOperand()) {
- LConstantOperand* constant_from = LConstantOperand::cast(from);
- if (to->IsRegister()) {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToRegister(to), ToHandle(constant_from));
- }
- } else {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToOperand(to), ToHandle(constant_from));
- }
- }
- } else if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), cpu_scratch);
- } else if (to->IsStackSlot()) {
- __ movq(ToOperand(to), cpu_scratch);
- } else if (to->IsDoubleRegister()) {
- __ movsd(ToDoubleRegister(to), xmm_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), xmm_scratch);
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister()) {
- __ movq(cpu_scratch, ToRegister(from));
- } else if (from->IsStackSlot()) {
- __ movq(cpu_scratch, ToOperand(from));
- } else if (from->IsDoubleRegister()) {
- __ movsd(xmm_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), ToRegister(from));
- } else {
- __ movq(ToOperand(to), ToRegister(from));
- }
- } else if (to->IsRegister()) {
- __ movq(ToRegister(to), ToOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ push(rax);
- __ movq(rax, ToOperand(from));
- __ movq(ToOperand(to), rax);
- __ pop(rax);
- } else if (from->IsDoubleRegister()) {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), ToDoubleRegister(from));
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(ToDoubleRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- __ movsd(ToOperand(to), xmm_scratch);
- }
- }
+ resolver_.Resolve(move);
}
@@ -788,7 +593,56 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
- Abort("Unimplemented: %s", "DoCallStub");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ switch (instr->hydrogen()->major_key()) {
+ case CodeStub::RegExpConstructResult: {
+ RegExpConstructResultStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::RegExpExec: {
+ RegExpExecStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::SubString: {
+ SubStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCharAt: {
+ // TODO(1116): Add StringCharAt stub to x64.
+ Abort("Unimplemented: %s", "StringCharAt Stub");
+ break;
+ }
+ case CodeStub::MathPow: {
+ // TODO(1115): Add MathPow stub to x64.
+ Abort("Unimplemented: %s", "MathPow Stub");
+ break;
+ }
+ case CodeStub::NumberToString: {
+ NumberToStringStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringAdd: {
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::StringCompare: {
+ StringCompareStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ case CodeStub::TranscendentalCache: {
+ TranscendentalCacheStub stub(instr->transcendental_type());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
@@ -803,24 +657,224 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- Abort("Unimplemented: %s", "DoDivI");}
+ LOperand* right = instr->InputAt(1);
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+
+ Register left_reg = rax;
+
+ // Check for x / 0.
+ Register right_reg = ToRegister(right);
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel left_not_zero;
+ __ testl(left_reg, left_reg);
+ __ j(not_zero, &left_not_zero);
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(sign, instr->environment());
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (-kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ NearLabel left_not_min_int;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &left_not_min_int);
+ __ cmpl(right_reg, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
+ // Sign extend to rdx.
+ __ cdq();
+ __ idivl(right_reg);
+
+ // Deoptimize if remainder is not 0.
+ __ testl(rdx, rdx);
+ DeoptimizeIf(not_zero, instr->environment());
+}
void LCodeGen::DoMulI(LMulI* instr) {
- Abort("Unimplemented: %s", "DoMultI");}
+ Register left = ToRegister(instr->InputAt(0));
+ LOperand* right = instr->InputAt(1);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ movl(kScratchRegister, left);
+ }
+
+ if (right->IsConstantOperand()) {
+ int right_value = ToInteger32(LConstantOperand::cast(right));
+ __ imull(left, left, Immediate(right_value));
+ } else if (right->IsStackSlot()) {
+ __ imull(left, ToOperand(right));
+ } else {
+ __ imull(left, ToRegister(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ // Bail out if the result is supposed to be negative zero.
+ NearLabel done;
+ __ testl(left, left);
+ __ j(not_zero, &done);
+ if (right->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else if (right->IsStackSlot()) {
+ __ or_(kScratchRegister, ToOperand(right));
+ DeoptimizeIf(sign, instr->environment());
+ } else {
+ // Test the non-zero operand for negative sign.
+ __ or_(kScratchRegister, ToRegister(right));
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ bind(&done);
+ }
+}
void LCodeGen::DoBitI(LBitI* instr) {
- Abort("Unimplemented: %s", "DoBitI");}
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+
+ if (right->IsConstantOperand()) {
+ int right_operand = ToInteger32(LConstantOperand::cast(right));
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), Immediate(right_operand));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), Immediate(right_operand));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (right->IsStackSlot()) {
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToOperand(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToOperand(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ ASSERT(right->IsRegister());
+ switch (instr->op()) {
+ case Token::BIT_AND:
+ __ andl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_OR:
+ __ orl(ToRegister(left), ToRegister(right));
+ break;
+ case Token::BIT_XOR:
+ __ xorl(ToRegister(left), ToRegister(right));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
void LCodeGen::DoShiftI(LShiftI* instr) {
- Abort("Unimplemented: %s", "DoShiftI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+ ASSERT(left->IsRegister());
+ if (right->IsRegister()) {
+ ASSERT(ToRegister(right).is(rcx));
+
+ switch (instr->op()) {
+ case Token::SAR:
+ __ sarl_cl(ToRegister(left));
+ break;
+ case Token::SHR:
+ __ shrl_cl(ToRegister(left));
+ if (instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case Token::SHL:
+ __ shll_cl(ToRegister(left));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ int value = ToInteger32(LConstantOperand::cast(right));
+ uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+ switch (instr->op()) {
+ case Token::SAR:
+ if (shift_count != 0) {
+ __ sarl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHR:
+ if (shift_count == 0 && instr->can_deopt()) {
+ __ testl(ToRegister(left), ToRegister(left));
+ DeoptimizeIf(negative, instr->environment());
+ } else {
+ __ shrl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ case Token::SHL:
+ if (shift_count != 0) {
+ __ shll(ToRegister(left), Immediate(shift_count));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
}
void LCodeGen::DoSubI(LSubI* instr) {
- Abort("Unimplemented: %s", "DoSubI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ subl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ subl(ToRegister(left), ToRegister(right));
+ } else {
+ __ subl(ToRegister(left), ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -854,18 +908,29 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
+ ASSERT(instr->result()->IsRegister());
__ Move(ToRegister(instr->result()), instr->value());
}
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
+}
+
+
+void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
+ Register result = ToRegister(instr->result());
+ Register array = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(array, PixelArray::kLengthOffset));
}
@@ -875,12 +940,20 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Abort("Unimplemented: %s", "DoBitNotI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->Equals(instr->result()));
+ __ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
+ __ push(ToRegister(instr->InputAt(0)));
+ CallRuntime(Runtime::kThrow, 1, instr);
+
+ if (FLAG_debug_code) {
+ Comment("Unreachable code.");
+ __ int3();
+ }
}
@@ -914,8 +987,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
- GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS);
- stub.SetArgsInRegisters();
+ TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -930,12 +1002,88 @@ int LCodeGen::GetNextEmittedBlock(int block) {
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- Abort("Unimplemented: %s", "EmitBranch");
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ if (cc != always) {
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
+ }
+ }
}
void LCodeGen::DoBranch(LBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Representation r = instr->hydrogen()->representation();
+ if (r.IsInteger32()) {
+ Register reg = ToRegister(instr->InputAt(0));
+ __ testl(reg, reg);
+ EmitBranch(true_block, false_block, not_zero);
+ } else if (r.IsDouble()) {
+ XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(reg, xmm0);
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->InputAt(0));
+ HType type = instr->hydrogen()->type();
+ if (type.IsBoolean()) {
+ __ Cmp(reg, Factory::true_value());
+ EmitBranch(true_block, false_block, equal);
+ } else if (type.IsSmi()) {
+ __ SmiCompare(reg, Smi::FromInt(0));
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ j(equal, false_label);
+ __ SmiCompare(reg, Smi::FromInt(0));
+ __ j(equal, false_label);
+ __ JumpIfSmi(reg, true_label);
+
+ // Test for double values. Plus/minus zero and NaN are false.
+ NearLabel call_stub;
+ __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_stub);
+
+ // HeapNumber => false iff +0, -0, or NaN. These three cases set the
+ // zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ j(zero, false_label);
+ __ jmp(true_label);
+
+ // The conversion stub doesn't cause garbage collections so it's
+ // safe to not record a safepoint after the call.
+ __ bind(&call_stub);
+ ToBooleanStub stub;
+ __ Pushad();
+ __ push(reg);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ Popad();
+ EmitBranch(true_block, false_block, not_zero);
+ }
+ }
}
@@ -957,7 +1105,11 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- Abort("Unimplemented: %s", "DoDeferredStackCheck");
+ __ Pushad();
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ __ Popad();
}
@@ -979,7 +1131,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
}
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
switch (op) {
case Token::EQ:
@@ -1008,67 +1160,282 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- Abort("Unimplemented: %s", "EmitCmpI");
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (left->IsRegister()) {
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(left), Immediate(value));
+ }
+ } else if (right->IsRegister()) {
+ __ cmpl(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpl(ToRegister(left), ToOperand(right));
+ }
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("Unimplemented: %s", "DoCmpID");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+
+ NearLabel unordered;
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the unordered case, which produces a false value.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ NearLabel done;
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ j(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpIDAndBranch");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectEq");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ NearLabel different, done;
+ __ cmpq(left, right);
+ __ j(not_equal, &different);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&different);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmpq(left, right);
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoIsNull(LIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ // If the expression is known to be a smi, then it's
+ // definitely not null. Materialize false.
+ // Consider adding other type and representation tests too.
+ if (instr->hydrogen()->value()->type().IsSmi()) {
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ return;
+ }
+
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ if (instr->is_strict()) {
+ __ movl(result, Immediate(Heap::kTrueValueRootIndex));
+ NearLabel load;
+ __ j(equal, &load);
+ __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+ __ bind(&load);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ } else {
+ NearLabel true_value, false_value, done;
+ __ j(equal, &true_value);
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &true_value);
+ __ JumpIfSmi(reg, &false_value);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = result;
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &true_value);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+ }
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsNullAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ // If the expression is known to untagged or smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ // Jump directly to the false block.
+ EmitGoto(false_block);
+ return;
+ }
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ Cmp(reg, Factory::null_value());
+ if (instr->is_strict()) {
+ EmitBranch(true_block, false_block, equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ __ j(equal, true_label);
+ __ Cmp(reg, Factory::undefined_value());
+ __ j(equal, true_label);
+ __ JumpIfSmi(reg, false_label);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+ }
}
Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("Unimplemented: %s", "EmitIsObject");
+ ASSERT(!input.is(kScratchRegister));
+
+ __ JumpIfSmi(input, is_not_object);
+
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ j(equal, is_object);
+
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, is_not_object);
+
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, is_not_object);
+ __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
+ __ j(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsObjectAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsObject(reg, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
+ LOperand* input_operand = instr->InputAt(0);
+ Register result = ToRegister(instr->result());
+ if (input_operand->IsRegister()) {
+ Register input = ToRegister(input_operand);
+ __ CheckSmiToIndicator(result, input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ __ CheckSmiToIndicator(result, input);
+ }
+ // result is zero if input is a smi, and one otherwise.
+ ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size,
+ Heap::kTrueValueRootIndex * kPointerSize));
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsSmiAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Condition is_smi;
+ if (instr->InputAt(0)->IsRegister()) {
+ Register input = ToRegister(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ }
+ EmitBranch(true_block, false_block, is_smi);
+}
+
+
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return equal;
+ if (to == LAST_TYPE) return above_equal;
+ if (from == FIRST_TYPE) return below_equal;
+ UNREACHABLE();
+ return equal;
}
@@ -1078,7 +1445,17 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ JumpIfSmi(input, false_label);
+
+ __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
@@ -1089,34 +1466,118 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ testl(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, not_equal);
}
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// Branches to a label or falls through with the answer in the z flag.
+// Trashes the temp register and possibly input (if it and temp are aliased).
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
- Handle<String>class_name,
+ Handle<String> class_name,
Register input,
- Register temp,
- Register temp2) {
- Abort("Unimplemented: %s", "EmitClassOfTest");
+ Register temp) {
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ __ j(equal, is_true);
+ } else {
+ __ j(equal, is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ j(not_equal, is_true);
+ } else {
+ __ j(not_equal, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(temp, FieldOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+ ASSERT(class_name->IsSymbol());
+ __ Cmp(temp, class_name);
+ // End with the answer in the z flag.
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ NearLabel done;
+ Label is_true, is_false;
+
+ EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
+
+ __ j(not_equal, &is_false);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Abort("Unimplemented: %s", "DoClassOfTestAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp);
+
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpMapAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+ int true_block = instr->true_block_id();
+ int false_block = instr->false_block_id();
+
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+ EmitBranch(true_block, false_block, equal);
}
@@ -1126,7 +1587,13 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, zero);
}
@@ -1142,12 +1609,42 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
void LCodeGen::DoCmpT(LCmpT* instr) {
- Abort("Unimplemented: %s", "DoCmpT");
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(condition, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpTAndBranch");
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ // The compare stub expects compare condition and the input operands
+ // reversed for GT and LTE.
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, condition);
}
@@ -1160,17 +1657,46 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((ParameterCount() + 1) * kPointerSize);
+ __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
}
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
- Abort("Unimplemented: %s", "DoLoadGlobal");
+ Register result = ToRegister(instr->result());
+ if (result.is(rax)) {
+ __ load_rax(instr->hydrogen()->cell().location(),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ } else {
+ __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(result, Operand(result, 0));
+ }
+ if (instr->hydrogen()->check_hole_value()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
- Abort("Unimplemented: %s", "DoStoreGlobal");
+ Register value = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(temp));
+ bool check_hole = instr->hydrogen()->check_hole_value();
+ if (!check_hole && value.is(rax)) {
+ __ store_rax(instr->hydrogen()->cell().location(),
+ RelocInfo::GLOBAL_PROPERTY_CELL);
+ return;
+ }
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted. We deoptimize in that case.
+ __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ if (check_hole) {
+ __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ __ movq(Operand(temp, 0), value);
}
@@ -1180,22 +1706,93 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedField");
+ Register object = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+ } else {
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+ }
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ __ Move(rcx, instr->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+ Register function = ToRegister(instr->function());
+ Register result = ToRegister(instr->result());
+
+ // Check that the function really is a function.
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ // Check whether the function has an instance prototype.
+ NearLabel non_instance;
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ __ movq(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check that the function has a prototype or an initial map.
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+
+ // If the function does not have an initial map, we're done.
+ NearLabel done;
+ __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ __ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in the function's map.
+ __ bind(&non_instance);
+ __ movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ __ bind(&done);
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
+ if (FLAG_debug_code) {
+ NearLabel done;
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ j(equal, &done);
+ __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Factory::fixed_cow_array_map());
+ __ Check(equal, "Check for fast elements failed.");
+ __ bind(&done);
+ }
+}
+
+
+void LCodeGen::DoLoadPixelArrayExternalPointer(
+ LLoadPixelArrayExternalPointer* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+ __ movq(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
}
@@ -1205,7 +1802,31 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+ Register elements = ToRegister(instr->elements());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(elements));
+
+ // Load the result.
+ __ movq(result, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Check for the hole value.
+ __ Cmp(result, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
+ Register external_elements = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(external_elements));
+
+ // Load the result.
+ __ movzxbq(result, Operand(external_elements, key, times_1, 0));
}
@@ -1230,29 +1851,88 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- Abort("Unimplemented: %s", "DoPushArgument");
+ LOperand* argument = instr->InputAt(0);
+ if (argument->IsConstantOperand()) {
+ LConstantOperand* const_op = LConstantOperand::cast(argument);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ push(Immediate(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else {
+ ASSERT(r.IsTagged());
+ __ Push(literal);
+ }
+ } else if (argument->IsRegister()) {
+ __ push(ToRegister(argument));
+ } else {
+ ASSERT(!argument->IsDoubleRegister());
+ __ push(ToOperand(argument));
+ }
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
+ Register result = ToRegister(instr->result());
+ __ movq(result, GlobalObjectOperand());
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Abort("Unimplemented: %s", "DoGlobalReceiver");
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr) {
- Abort("Unimplemented: %s", "CallKnownFunction");
+ // Change context if needed.
+ bool change_context =
+ (graph()->info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ }
+
+ // Set rax to arguments count if adaption is not needed. Assumes that rax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ Set(rax, arity);
+ }
+
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+
+ // Invoke function.
+ if (*function == *graph()->info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Setup deoptimization.
+ RegisterLazyDeoptimization(instr);
+
+ // Restore context.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->function());
+ CallKnownFunction(instr->function(), instr->arity(), instr);
}
@@ -1317,7 +1997,13 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
void LCodeGen::DoCallNamed(LCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -1327,17 +2013,29 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+ __ Move(rcx, instr->name());
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
+ ASSERT(ToRegister(instr->result()).is(rax));
+ __ Move(rdi, instr->target());
+ CallKnownFunction(instr->target(), instr->arity(), instr);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
- Abort("Unimplemented: %s", "DoCallNew");
+ ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Set(rax, instr->arity());
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -1347,7 +2045,32 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedField");
+ Register object = ToRegister(instr->object());
+ Register value = ToRegister(instr->value());
+ int offset = instr->offset();
+
+ if (!instr->transition().is_null()) {
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ }
+
+ // Do the store.
+ if (instr->is_in_object()) {
+ __ movq(FieldOperand(object, offset), value);
+ if (instr->needs_write_barrier()) {
+ Register temp = ToRegister(instr->TempAt(0));
+ // Update the write barrier for the object for in-object properties.
+ __ RecordWrite(object, offset, value, temp);
+ }
+ } else {
+ Register temp = ToRegister(instr->TempAt(0));
+ __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(temp, offset), value);
+ if (instr->needs_write_barrier()) {
+ // Update the write barrier for the properties array.
+ // object is used as a scratch register.
+ __ RecordWrite(temp, offset, value, object);
+ }
+ }
}
@@ -1357,12 +2080,43 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
+ if (instr->length()->IsRegister()) {
+ __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
+ } else {
+ __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
+ }
+ DeoptimizeIf(above_equal, instr->environment());
}
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(FieldOperand(elements, offset), value);
+ } else {
+ __ movq(FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ value);
+ }
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ // Compute address of modified element and store it into key register.
+ __ lea(key, FieldOperand(elements,
+ key,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ RecordWrite(elements, key, value);
+ }
}
@@ -1372,54 +2126,186 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Abort("Unimplemented: %s", "DoInteger32ToDouble");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoNumberTagI");
-}
-
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagI");
+ __ Integer32ToSmi(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoNumberTagD");
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->TempAt(0));
+
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, deferred->entry());
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagD");
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Smi::FromInt(0));
+
+ __ PushSafepointRegisters();
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Ensure that value in rax survives popping registers.
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ movq(reg, kScratchRegister);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- Abort("Unimplemented: %s", "DoSmiTag");
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+ __ Integer32ToSmi(input, input);
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Abort("Unimplemented: %s", "DoSmiUntag");
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ if (instr->needs_check()) {
+ Condition is_smi = __ CheckSmi(input);
+ DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+ }
+ __ SmiToInteger32(input, input);
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
LEnvironment* env) {
- Abort("Unimplemented: %s", "EmitNumberUntagD");
+ NearLabel load_smi, heap_number, done;
+
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi);
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &heap_number);
+
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorpd(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done);
+
+ // Heap number to XMM conversion.
+ __ bind(&heap_number);
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ // Smi to XMM conversion
+ __ bind(&load_smi);
+ __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ bind(&done);
}
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+ LTaggedToI* instr_;
+};
+
+
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+ NearLabel done, heap_number;
+ Register input_reg = ToRegister(instr->InputAt(0));
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+
+ if (instr->truncating()) {
+ __ j(equal, &heap_number);
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movl(input_reg, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&heap_number);
+
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2siq(input_reg, xmm0);
+ __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+ __ cmpl(input_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ // Deoptimize if we don't have a heap number.
+ DeoptimizeIf(not_equal, instr->environment());
+
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ cvttsd2si(input_reg, xmm0);
+ __ cvtlsi2sd(xmm_temp, input_reg);
+ __ ucomisd(xmm0, xmm_temp);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(input_reg, input_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(input_reg, xmm0);
+ __ andl(input_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ }
+ __ bind(&done);
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- Abort("Unimplemented: %s", "DoTaggedToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ ASSERT(input->Equals(instr->result()));
+
+ Register input_reg = ToRegister(input);
+ DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
}
@@ -1434,42 +2320,146 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckSmi");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Condition cc = masm()->CheckSmi(ToRegister(input));
+ if (instr->condition() != equal) {
+ cc = NegateCondition(cc);
+ }
+ DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ InstanceType first = instr->hydrogen()->first();
+ InstanceType last = instr->hydrogen()->last();
+
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+
+ // If there is only one type in the interval check for equality.
+ if (first == last) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+ // String has a dedicated bit in instance type.
+ __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(kIsNotStringMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(first)));
+ DeoptimizeIf(below, instr->environment());
+ // Omit check for the last type.
+ if (last != LAST_TYPE) {
+ __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+ Immediate(static_cast<int8_t>(last)));
+ DeoptimizeIf(above, instr->environment());
+ }
+ }
}
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Abort("Unimplemented: %s", "DoCheckFunction");
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
+ __ Cmp(reg, instr->hydrogen()->target());
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
- Abort("Unimplemented: %s", "DoCheckMap");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ Register reg = ToRegister(input);
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ instr->hydrogen()->map());
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- Abort("Unimplemented: %s", "LoadHeapObject");
+ if (Heap::InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ Factory::NewJSGlobalPropertyCell(object);
+ __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(result, Operand(result, 0));
+ } else {
+ __ Move(result, object);
+ }
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+ Register reg = ToRegister(instr->TempAt(0));
+
+ Handle<JSObject> holder = instr->holder();
+ Handle<JSObject> current_prototype = instr->prototype();
+
+ // Load prototype object.
+ LoadHeapObject(reg, current_prototype);
+
+ // Check prototype maps up to the holder.
+ while (!current_prototype.is_identical_to(holder)) {
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current_prototype->map()));
+ DeoptimizeIf(not_equal, instr->environment());
+ current_prototype =
+ Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+ // Load next prototype object.
+ LoadHeapObject(reg, current_prototype);
+ }
+
+ // Check the holder map.
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Handle<Map>(current_prototype->map()));
+ DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_elements());
+
+ // Pick the right runtime function or stub to call.
+ int length = instr->hydrogen()->length();
+ if (instr->hydrogen()->IsCopyOnWrite()) {
+ ASSERT(instr->hydrogen()->depth() == 1);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+ } else {
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, length);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ }
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
+ // Setup the parameters to the stub/runtime call.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->constant_properties());
+ __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+
+ // Pick the right runtime function to call.
+ if (instr->hydrogen()->depth() > 1) {
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+ } else {
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ }
}
@@ -1479,7 +2469,20 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+ bool pretenure = instr->hydrogen()->pretenure();
+ if (shared_info->num_literals() == 0 && !pretenure) {
+ FastNewClosureStub stub;
+ __ Push(shared_info);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ } else {
+ __ push(rsi);
+ __ Push(shared_info);
+ __ Push(pretenure ? Factory::true_value() : Factory::false_value());
+ CallRuntime(Runtime::kNewClosure, 3, instr);
+ }
}
@@ -1493,8 +2496,67 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ NearLabel true_label;
+ NearLabel false_label;
+ NearLabel done;
+
+ EmitIsConstructCall(result);
+ __ j(equal, &true_label);
+
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ NearLabel check_frame_marker;
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+}
+
+
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Abort("Unimplemented: %s", "DoTypeofIsAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal());
+
+ EmitBranch(true_block, false_block, final_branch_condition);
}
@@ -1502,8 +2564,63 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name) {
- Abort("Unimplemented: %s", "EmitTypeofIs");
- return no_condition;
+ Condition final_branch_condition = no_condition;
+ if (type_name->Equals(Heap::number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+ final_branch_condition = below;
+
+ } else if (type_name->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+ __ j(equal, true_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = not_zero;
+
+ } else if (type_name->Equals(Heap::function_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ final_branch_condition = above_equal;
+
+ } else if (type_name->Equals(Heap::object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ Cmp(input, Factory::null_value());
+ __ j(equal, true_label);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ // Check for JS objects that are not RegExp or Function => true.
+ __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
+ final_branch_condition = below_equal;
+
+ } else {
+ final_branch_condition = never;
+ __ jmp(false_label);
+ }
+
+ return final_branch_condition;
}
@@ -1526,7 +2643,6 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
// Perform stack overflow check.
NearLabel done;
- ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 8d1c5c4e..6f8f06e3 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -34,37 +34,15 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -80,10 +58,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ int ToInteger32(LConstantOperand* op) const;
+ bool IsTaggedConstant(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op) const;
+
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -95,7 +87,6 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
@@ -129,7 +120,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -138,8 +128,7 @@ class LCodeGen BASE_EMBEDDED {
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary,
- Register temporary2);
+ Register temporary);
int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); }
@@ -191,13 +180,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
@@ -210,6 +192,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
@@ -232,11 +218,13 @@ class LCodeGen BASE_EMBEDDED {
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsConstructCall().
+ // Caller should branch on equal condition.
+ void EmitIsConstructCall(Register temp);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
new file mode 100644
index 00000000..cedd0256
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -0,0 +1,320 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "x64/lithium-gap-resolver-x64.h"
+#include "x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack-allocated local. Recursion may allow
+ // multiple moves to be pending.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ moves_[index].Eliminate();
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ Register src = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(dst, src);
+ }
+
+ } else if (source->IsStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ // Allow top 32 bits of an untagged Integer32 to be arbitrary.
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(xmm0, src);
+ __ movsd(cgen_->ToOperand(destination), xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Swap two general-purpose registers.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Swap a general-purpose register and a stack slot.
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ __ movq(kScratchRegister, mem);
+ __ movq(mem, reg);
+ __ movq(reg, kScratchRegister);
+
+ } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
+ // Swap two stack slots or two double stack slots.
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movq(kScratchRegister, dst);
+ __ movsd(dst, xmm0);
+ __ movq(src, kScratchRegister);
+
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // Swap two double registers.
+ XMMRegister source_reg = cgen_->ToDoubleRegister(source);
+ XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
+ __ movsd(xmm0, source_reg);
+ __ movsd(source_reg, destination_reg);
+ __ movsd(destination_reg, xmm0);
+
+ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ // Swap a double register and a double stack slot.
+ ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
+ XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+ ? source
+ : destination);
+ LOperand* other = source->IsDoubleRegister() ? destination : source;
+ ASSERT(other->IsDoubleStackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movsd(xmm0, other_operand);
+ __ movsd(other_operand, reg);
+ __ movsd(reg, xmm0);
+
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ moves_[index].Eliminate();
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-gap-resolver-x64.h b/src/x64/lithium-gap-resolver-x64.h
new file mode 100644
index 00000000..d8284559
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.h
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5ef6eb75..a6afbf72 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
+#include "lithium-allocator-inl.h"
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
@@ -68,11 +69,33 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
}
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs must use a fixed register.
+ ASSERT(Output() == NULL ||
+ LUnallocated::cast(Output())->HasFixedPolicy() ||
+ !LUnallocated::cast(Output())->HasRegisterPolicy());
+ for (UseIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+ for (TempIterator it(this); it.HasNext(); it.Advance()) {
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
+ }
+}
+#endif
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
- if (HasResult()) {
- PrintOutputOperandTo(stream);
- }
+
+ PrintOutputOperandTo(stream);
PrintDataTo(stream);
@@ -162,6 +185,12 @@ const char* LArithmeticT::Mnemonic() const {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@@ -262,7 +291,8 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- stream->Add("(%d, %d)", context_chain_length(), slot_index());
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d]", slot_index());
}
@@ -318,7 +348,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
// All stack slots are Double stack slots on x64.
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
@@ -386,7 +416,7 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
}
-int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
if (instr->IsControl()) {
@@ -402,7 +432,6 @@ int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
- return index;
}
@@ -653,16 +682,16 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
+ instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
+ instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
@@ -670,7 +699,10 @@ void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
- allocator_->MarkAsCall();
+#ifdef DEBUG
+ instr->VerifyCall();
+#endif
+ instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
@@ -695,7 +727,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
- allocator_->MarkAsSaveDoubles();
+ instr->MarkAsSaveDoubles();
return instr;
}
@@ -740,8 +772,72 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- Abort("Unimplemented: %s", "DoBit");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), rdx);
+ LOperand* right = UseFixed(instr->right(), rax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
+
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+ ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+ HValue* right_value = instr->OperandAt(1);
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ } else {
+ right = UseFixed(right_value, rcx);
+ }
+
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool can_deopt = (op == Token::SHR && constant_value == 0);
+ if (can_deopt) {
+ bool can_truncate = true;
+ for (int i = 0; i < instr->uses()->length(); i++) {
+ if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+ can_truncate = false;
+ break;
+ }
+ }
+ can_deopt = !can_truncate;
+ }
+
+ LShiftI* result = new LShiftI(op, left, right, can_deopt);
+ return can_deopt
+ ? AssignEnvironment(DefineSameAsFirst(result))
+ : DefineSameAsFirst(result);
}
@@ -836,7 +932,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- allocator_->BeginInstruction();
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
@@ -847,26 +942,19 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch() && !instr->IsGoto()) {
- // TODO(fschneider): Handle branch instructions uniformly like
- // other instructions. This requires us to generate the right
- // branch instruction already at the HIR level.
+ if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
- HBranch* branch = HBranch::cast(current);
- instr->set_hydrogen_value(branch->value());
- HBasicBlock* first = branch->FirstSuccessor();
- HBasicBlock* second = branch->SecondSuccessor();
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
- int index = chunk_->AddInstruction(instr, current_block_);
- allocator_->SummarizeInstruction(index);
- } else {
- // This instruction should be omitted.
- allocator_->OmitInstruction();
+ chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
@@ -912,16 +1000,108 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
- return NULL;
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+ HValue* v = instr->value();
+ if (v->EmitAtUses()) {
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ Token::Value op = compare->token();
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
+ } else {
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ bool reversed = op == Token::GT || op == Token::LTE;
+ LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
+ LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
+ LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+ right_operand);
+ return MarkAsCall(result, instr);
+ }
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+ temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
+ } else if (v->IsCompareJSObjectEq()) {
+ HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+ return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsInstanceOf()) {
+ HInstanceOf* instance_of = HInstanceOf::cast(v);
+ LInstanceOfAndBranch* result =
+ new LInstanceOfAndBranch(
+ UseFixed(instance_of->left(), InstanceofStub::left()),
+ UseFixed(instance_of->right(), InstanceofStub::right()));
+ return MarkAsCall(result, instr);
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else if (v->IsIsConstructCall()) {
+ return new LIsConstructCallAndBranch(TempRegister());
+ } else {
+ if (v->IsConstant()) {
+ if (HConstant::cast(v)->handle()->IsTrue()) {
+ return new LGoto(instr->FirstSuccessor()->block_id());
+ } else if (HConstant::cast(v)->handle()->IsFalse()) {
+ return new LGoto(instr->SecondSuccessor()->block_id());
+ }
+ }
+ Abort("Undefined compare before branch");
+ return NULL;
+ }
+ }
+ return new LBranch(UseRegisterAtStart(v));
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCompareMapAndBranch");
- return NULL;
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LCmpMapAndBranch(value);
}
@@ -957,27 +1137,37 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- Abort("Unimplemented: %s", "DoPushArgument");
+ ++argument_count_;
+ LOperand* argument = UseOrConstant(instr->argument());
+ return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+ return DefineAsRegister(new LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
+ Abort("Unimplemented: DoOuterContext");
return NULL;
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
- return NULL;
+ return DefineAsRegister(new LGlobalObject);
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- Abort("Unimplemented: %s", "DoGlobalReceiver");
- return NULL;
+ return DefineAsRegister(new LGlobalReceiver);
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- Abort("Unimplemented: %s", "DoCallConstantFunction");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
}
@@ -994,26 +1184,28 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- Abort("Unimplemented: %s", "DoCallNamed");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallGlobal");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoCallKnownGlobal");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- Abort("Unimplemented: %s", "DoCallNew");
- return NULL;
+ LOperand* constructor = UseFixed(instr->constructor(), rdi);
+ argument_count_ -= instr->argument_count();
+ LCallNew* result = new LCallNew(constructor);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1024,56 +1216,65 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- Abort("Unimplemented: %s", "DoShr");
- return NULL;
+ return DoShift(Token::SHR, instr);
}
LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- Abort("Unimplemented: %s", "DoSar");
- return NULL;
+ return DoShift(Token::SAR, instr);
}
LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- Abort("Unimplemented: %s", "DoShl");
- return NULL;
+ return DoShift(Token::SHL, instr);
}
LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
- Abort("Unimplemented: %s", "DoBitAnd");
- return NULL;
+ return DoBit(Token::BIT_AND, instr);
}
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- Abort("Unimplemented: %s", "DoBitNot");
- return NULL;
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new LBitNotI(input);
+ return DefineSameAsFirst(result);
}
LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
- Abort("Unimplemented: %s", "DoBitOr");
- return NULL;
+ return DoBit(Token::BIT_OR, instr);
}
LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
- Abort("Unimplemented: %s", "DoBitXor");
- return NULL;
+ return DoBit(Token::BIT_XOR, instr);
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- Abort("Unimplemented: %s", "DoDiv");
- return NULL;
+ if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
+ } else if (instr->representation().IsInteger32()) {
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into rdx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* result = new LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(result, rax));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::DIV, instr);
+ }
}
@@ -1084,14 +1285,40 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- Abort("Unimplemented: %s", "DoMul");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LMulI* mul = new LMulI(left, right);
+ return AssignEnvironment(DefineSameAsFirst(mul));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MUL, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::MUL, instr);
+ }
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- Abort("Unimplemented: %s", "DoSub");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new LSubI(left, right);
+ LInstruction* result = DefineSameAsFirst(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::SUB, instr);
+ }
}
@@ -1124,33 +1351,62 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Abort("Unimplemented: %s", "DoCompare");
- return NULL;
+ Token::Value op = instr->token();
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+ LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCompareJSObjectEq");
- return NULL;
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsNull(value));
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+
+ return DefineAsRegister(new LIsObject(value));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseAtStart(instr->value());
+
+ return DefineAsRegister(new LIsSmi(value));
}
@@ -1174,14 +1430,20 @@ LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- Abort("Unimplemented: %s", "DoJSArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
- Abort("Unimplemented: %s", "DoFixedArrayLength");
- return NULL;
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LFixedArrayLength(array));
+}
+
+
+LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
+ LOperand* array = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LPixelArrayLength(array));
}
@@ -1192,56 +1454,121 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- Abort("Unimplemented: %s", "DoBoundsCheck");
+ return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+ Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
return NULL;
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- Abort("Unimplemented: %s", "DoThrow");
- return NULL;
+ LOperand* value = UseFixed(instr->value(), rax);
+ return MarkAsCall(new LThrow(value), instr);
}
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Abort("Unimplemented: %s", "DoChange");
+ Representation from = instr->from();
+ Representation to = instr->to();
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberUntagD* res = new LNumberUntagD(value);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ bool needs_check = !instr->value()->type().IsSmi();
+ if (needs_check) {
+ LOperand* xmm_temp =
+ (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ ? NULL
+ : FixedTemp(xmm1);
+ LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ } else {
+ return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new LNumberTagD(value, temp);
+ return AssignPointerMap(Define(result, result_temp));
+ } else {
+ ASSERT(to.IsInteger32());
+ bool needs_temp = instr->CanTruncateToInt32() &&
+ !CpuFeatures::IsSupported(SSE3);
+ LOperand* value = needs_temp ?
+ UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ }
+ } else if (from.IsInteger32()) {
+ if (to.IsTagged()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return DefineSameAsFirst(new LSmiTag(value));
+ } else {
+ LNumberTagI* result = new LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ }
+ }
+ UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckNonSmi");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value, zero));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- Abort("Unimplemented: %s", "DoCheckInstanceType");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LCheckInstanceType* result = new LCheckInstanceType(value);
+ return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
- return NULL;
+ LOperand* temp = TempRegister();
+ LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
+ return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- Abort("Unimplemented: %s", "DoCheckSmi");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckSmi(value, not_zero));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- Abort("Unimplemented: %s", "DoCheckFunction");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
- Abort("Unimplemented: %s", "DoCheckMap");
- return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ LCheckMap* result = new LCheckMap(value);
+ return AssignEnvironment(result);
}
@@ -1253,14 +1580,12 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
- int32_t value = instr->Integer32Value();
- return DefineAsRegister(new LConstantI(value));
+ return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
- double value = instr->DoubleValue();
LOperand* temp = TempRegister();
- return DefineAsRegister(new LConstantD(value, temp));
+ return DefineAsRegister(new LConstantD(temp));
} else if (r.IsTagged()) {
- return DefineAsRegister(new LConstantT(instr->handle()));
+ return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@@ -1269,14 +1594,17 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- Abort("Unimplemented: %s", "DoLoadGlobal");
- return NULL;
+ LLoadGlobal* result = new LLoadGlobal;
+ return instr->check_hole_value()
+ ? AssignEnvironment(DefineAsRegister(result))
+ : DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- Abort("Unimplemented: %s", "DoStoreGlobal");
- return NULL;
+ LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
+ TempRegister());
+ return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
@@ -1286,35 +1614,67 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedField");
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+ Abort("Unimplemented: DoStoreContextSlot");
return NULL;
}
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ ASSERT(instr->representation().IsTagged());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new LLoadNamedField(obj));
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadNamedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rax);
+ LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
- Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
- return NULL;
+ return AssignEnvironment(DefineAsRegister(
+ new LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- Abort("Unimplemented: %s", "DoLoadElements");
- return NULL;
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
+ HLoadPixelArrayExternalPointer* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
- return NULL;
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
+ HLoadPixelArrayElement* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* external_pointer =
+ UseRegisterAtStart(instr->external_pointer());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LLoadPixelArrayElement* result =
+ new LLoadPixelArrayElement(external_pointer, key);
+ return DefineSameAsFirst(result);
}
@@ -1326,8 +1686,20 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
- return NULL;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+
+ return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
}
@@ -1338,8 +1710,22 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedField");
- return NULL;
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = needs_write_barrier
+ ? UseTempRegister(instr->object())
+ : UseRegisterAtStart(instr->object());
+
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+
+ // We only need a scratch register if we have a write barrier or we
+ // have a store into the properties array (not in-object-property).
+ LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+ ? TempRegister() : NULL;
+
+ return new LStoreNamedField(obj, val, temp);
}
@@ -1349,18 +1735,28 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- Abort("Unimplemented: %s", "DoArrayLiteral");
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ Abort("Unimplemented: %s", "DoStringCharCodeAt");
return NULL;
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- Abort("Unimplemented: %s", "DoObjectLiteral");
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ Abort("Unimplemented: %s", "DoStringLength");
return NULL;
}
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
Abort("Unimplemented: %s", "DoRegExpLiteral");
return NULL;
@@ -1368,8 +1764,7 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- Abort("Unimplemented: %s", "DoFunctionLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
}
@@ -1398,8 +1793,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- Abort("Unimplemented: %s", "DoCallStub");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
}
@@ -1426,6 +1821,12 @@ LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return NULL;
}
+
+LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
+ return DefineAsRegister(new LIsConstructCall);
+}
+
+
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
@@ -1448,7 +1849,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LLazyBailout* lazy_bailout = new LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- instructions_pending_deoptimization_environment_->
+ instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
return result;
@@ -1464,13 +1865,21 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- Abort("Unimplemented: %s", "DoEnterInlined");
+ HEnvironment* outer = current_block_->last_environment();
+ HConstant* undefined = graph()->GetConstantUndefined();
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->function(),
+ false,
+ undefined);
+ current_block_->UpdateEnvironment(inner);
+ chunk_->AddInlinedClosure(instr->closure());
return NULL;
}
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- Abort("Unimplemented: %s", "DoLeaveInlined");
+ HEnvironment* outer = current_block_->last_environment()->outer();
+ current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 17d9dda1..0cb5cc7a 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -39,119 +39,8 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-
-// Type hierarchy:
-//
-// LInstruction
-// LTemplateInstruction
-// LControlInstruction
-// LBranch
-// LClassOfTestAndBranch
-// LCmpJSObjectEqAndBranch
-// LCmpIDAndBranch
-// LHasCachedArrayIndexAndBranch
-// LHasInstanceTypeAndBranch
-// LInstanceOfAndBranch
-// LIsNullAndBranch
-// LIsObjectAndBranch
-// LIsSmiAndBranch
-// LTypeofIsAndBranch
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LAddI
-// LApplyArguments
-// LArithmeticD
-// LArithmeticT
-// LBitI
-// LBoundsCheck
-// LCmpID
-// LCmpJSObjectEq
-// LCmpT
-// LDivI
-// LInstanceOf
-// LInstanceOfKnownGlobal
-// LLoadKeyedFastElement
-// LLoadKeyedGeneric
-// LModI
-// LMulI
-// LPower
-// LShiftI
-// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGap
-// LLabel
-// LGlobalObject
-// LGlobalReceiver
-// LGoto
-// LLazyBailout
-// LLoadGlobal
-// LCheckPrototypeMaps
-// LLoadContextSlot
-// LArrayLiteral
-// LObjectLiteral
-// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LBitNotI
-// LCallNew
-// LCheckFunction
-// LCheckPrototypeMaps
-// LCheckInstanceType
-// LCheckMap
-// LCheckSmi
-// LClassOfTest
-// LDeleteProperty
-// LDoubleToI
-// LFixedArrayLength
-// LHasCachedArrayIndex
-// LHasInstanceType
-// LInteger32ToDouble
-// LIsNull
-// LIsObject
-// LIsSmi
-// LJSArrayLength
-// LLoadNamedField
-// LLoadNamedGeneric
-// LLoadFunctionPrototype
-// LNumberTagD
-// LNumberTagI
-// LPushArgument
-// LReturn
-// LSmiTag
-// LStoreGlobal
-// LTaggedToI
-// LThrow
-// LTypeof
-// LTypeofIs
-// LUnaryMathOperation
-// LValueOf
-// LUnknownOSRValue
-
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
- V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@@ -195,6 +84,7 @@ class LCodeGen;
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
+ V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
@@ -232,6 +122,8 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
+ V(LoadPixelArrayElement) \
+ V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -240,6 +132,7 @@ class LCodeGen;
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
+ V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@@ -259,6 +152,8 @@ class LCodeGen;
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@@ -287,7 +182,11 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : hydrogen_value_(NULL) { }
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false),
+ is_save_doubles_(false) { }
+
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -304,16 +203,14 @@ class LInstruction: public ZoneObject {
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
- void set_environment(LEnvironment* env) { environment_.set(env); }
- LEnvironment* environment() const { return environment_.get(); }
- bool HasEnvironment() const { return environment_.is_set(); }
+ void set_environment(LEnvironment* env) { environment_ = env; }
+ LEnvironment* environment() const { return environment_; }
+ bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- virtual bool HasResult() const = 0;
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -327,41 +224,73 @@ class LInstruction: public ZoneObject {
return deoptimization_environment_.is_set();
}
+ void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+
+ virtual bool HasResult() const = 0;
+ virtual LOperand* result() = 0;
+
+ virtual int InputCount() = 0;
+ virtual LOperand* InputAt(int i) = 0;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
+ LOperand* FirstInput() { return InputAt(0); }
+ LOperand* Output() { return HasResult() ? result() : NULL; }
+
+#ifdef DEBUG
+ void VerifyCall();
+#endif
+
private:
- SetOncePointer<LEnvironment> environment_;
+ LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
+ bool is_call_;
+ bool is_save_doubles_;
};
-template<typename T, int N>
+template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
- for (int i = 0; i < N; i++) elems_[i] = NULL;
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
- int length() { return N; }
- T& operator[](int i) {
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
- T elems_[N];
+ ElementType elems_[NumElements];
};
-template<typename T>
-class OperandContainer<T, 0> {
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
};
-template<int R, int I, int T = 0>
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
@@ -512,7 +441,7 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
};
-template<int I, int T = 0>
+template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
@@ -570,7 +499,7 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
};
-class LArgumentsLength: public LTemplateInstruction<1, 1> {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
@@ -614,12 +543,11 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
};
-class LMulI: public LTemplateInstruction<1, 2, 1> {
+class LMulI: public LTemplateInstruction<1, 2, 0> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ LMulI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
@@ -627,7 +555,7 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpID: public LTemplateInstruction<1, 2> {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -644,7 +572,7 @@ class LCmpID: public LTemplateInstruction<1, 2> {
};
-class LCmpIDAndBranch: public LControlInstruction<2> {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -663,7 +591,7 @@ class LCmpIDAndBranch: public LControlInstruction<2> {
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value;
@@ -677,7 +605,7 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -688,7 +616,7 @@ class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -700,7 +628,7 @@ class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
};
-class LIsNull: public LTemplateInstruction<1, 1> {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
@@ -729,23 +657,20 @@ class LIsNullAndBranch: public LControlInstruction<1, 1> {
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@@ -754,7 +679,7 @@ class LIsObjectAndBranch: public LControlInstruction<1, 2> {
};
-class LIsSmi: public LTemplateInstruction<1, 1> {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
@@ -765,7 +690,7 @@ class LIsSmi: public LTemplateInstruction<1, 1> {
};
-class LIsSmiAndBranch: public LControlInstruction<1> {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -777,7 +702,7 @@ class LIsSmiAndBranch: public LControlInstruction<1> {
};
-class LHasInstanceType: public LTemplateInstruction<1, 1> {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
@@ -788,11 +713,10 @@ class LHasInstanceType: public LTemplateInstruction<1, 1> {
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
@@ -803,7 +727,7 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
@@ -814,7 +738,7 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -840,12 +764,11 @@ class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -856,7 +779,7 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
};
-class LCmpT: public LTemplateInstruction<1, 2> {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -870,7 +793,7 @@ class LCmpT: public LTemplateInstruction<1, 2> {
};
-class LCmpTAndBranch: public LControlInstruction<2> {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -884,7 +807,7 @@ class LCmpTAndBranch: public LControlInstruction<2> {
};
-class LInstanceOf: public LTemplateInstruction<1, 2> {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -895,7 +818,7 @@ class LInstanceOf: public LTemplateInstruction<1, 2> {
};
-class LInstanceOfAndBranch: public LControlInstruction<2> {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -935,7 +858,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
};
-class LBitI: public LTemplateInstruction<1, 2> {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -952,7 +875,7 @@ class LBitI: public LTemplateInstruction<1, 2> {
};
-class LShiftI: public LTemplateInstruction<1, 2> {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
@@ -972,7 +895,7 @@ class LShiftI: public LTemplateInstruction<1, 2> {
};
-class LSubI: public LTemplateInstruction<1, 2> {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -984,51 +907,37 @@ class LSubI: public LTemplateInstruction<1, 2> {
};
-template <int temp_count>
-class LConstant: public LTemplateInstruction<1, 0, temp_count> {
- DECLARE_INSTRUCTION(Constant)
-};
-
-
-class LConstantI: public LConstant<0> {
+class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantI(int32_t value) : value_(value) { }
- int32_t value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- int32_t value_;
+ int32_t value() const { return hydrogen()->Integer32Value(); }
};
-class LConstantD: public LConstant<1> {
+class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
- explicit LConstantD(double value, LOperand* temp) : value_(value) {
+ explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
- double value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- double value_;
+ double value() const { return hydrogen()->DoubleValue(); }
};
-class LConstantT: public LConstant<0> {
+class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LConstantT(Handle<Object> value) : value_(value) { }
- Handle<Object> value() const { return value_; }
-
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+ DECLARE_HYDROGEN_ACCESSOR(Constant)
- private:
- Handle<Object> value_;
+ Handle<Object> value() const { return hydrogen()->handle(); }
};
-class LBranch: public LControlInstruction<1> {
+class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
@@ -1041,28 +950,28 @@ class LBranch: public LControlInstruction<1> {
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
};
-class LJSArrayLength: public LTemplateInstruction<1, 1> {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1073,7 +982,18 @@ class LJSArrayLength: public LTemplateInstruction<1, 1> {
};
-class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LPixelArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
+};
+
+
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value;
@@ -1096,7 +1016,7 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
-class LThrow: public LTemplateInstruction<0, 1> {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
@@ -1106,7 +1026,7 @@ class LThrow: public LTemplateInstruction<0, 1> {
};
-class LBitNotI: public LTemplateInstruction<1, 1> {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
@@ -1116,7 +1036,7 @@ class LBitNotI: public LTemplateInstruction<1, 1> {
};
-class LAddI: public LTemplateInstruction<1, 2> {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1128,7 +1048,7 @@ class LAddI: public LTemplateInstruction<1, 2> {
};
-class LPower: public LTemplateInstruction<1, 2> {
+class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
@@ -1140,7 +1060,7 @@ class LPower: public LTemplateInstruction<1, 2> {
};
-class LArithmeticD: public LTemplateInstruction<1, 2> {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1158,7 +1078,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2> {
};
-class LArithmeticT: public LTemplateInstruction<1, 2> {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
@@ -1176,7 +1096,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2> {
};
-class LReturn: public LTemplateInstruction<0, 1> {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
@@ -1186,7 +1106,7 @@ class LReturn: public LTemplateInstruction<0, 1> {
};
-class LLoadNamedField: public LTemplateInstruction<1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
@@ -1197,7 +1117,7 @@ class LLoadNamedField: public LTemplateInstruction<1, 1> {
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
@@ -1211,11 +1131,10 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
};
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
@@ -1225,7 +1144,7 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
};
-class LLoadElements: public LTemplateInstruction<1, 1> {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
@@ -1235,7 +1154,18 @@ class LLoadElements: public LTemplateInstruction<1, 1> {
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadPixelArrayExternalPointer(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
+ "load-pixel-array-external-pointer")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
@@ -1250,7 +1180,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
+ "load-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1271,10 +1217,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LStoreGlobal: public LTemplateInstruction<0, 1> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ explicit LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
@@ -1282,19 +1229,23 @@ class LStoreGlobal: public LTemplateInstruction<0, 1> {
};
-class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LLoadContextSlot(LOperand* context) {
+ inputs_[0] = context;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() { return hydrogen()->context_chain_length(); }
+ LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
-class LPushArgument: public LTemplateInstruction<0, 1> {
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
@@ -1304,6 +1255,12 @@ class LPushArgument: public LTemplateInstruction<0, 1> {
};
+class LContext: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1328,10 +1285,10 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
-class LCallKeyed: public LTemplateInstruction<1, 0, 1> {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallKeyed(LOperand* temp) {
- temps_[0] = temp;
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
@@ -1388,7 +1345,7 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LCallNew: public LTemplateInstruction<1, 1> {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
@@ -1413,7 +1370,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
@@ -1423,7 +1380,7 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
};
-class LNumberTagI: public LTemplateInstruction<1, 1> {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
@@ -1474,7 +1431,7 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
-class LSmiTag: public LTemplateInstruction<1, 1> {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
@@ -1484,7 +1441,7 @@ class LSmiTag: public LTemplateInstruction<1, 1> {
};
-class LNumberUntagD: public LTemplateInstruction<1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
@@ -1494,7 +1451,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1> {
};
-class LSmiUntag: public LTemplateInstruction<1, 1> {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
@@ -1593,7 +1550,7 @@ class LStoreKeyedGeneric: public LStoreKeyed {
};
-class LCheckFunction: public LTemplateInstruction<0, 1> {
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
@@ -1604,11 +1561,10 @@ class LCheckFunction: public LTemplateInstruction<0, 1> {
};
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
+ explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
@@ -1616,7 +1572,7 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMap: public LTemplateInstruction<0, 1> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
@@ -1641,7 +1597,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
};
-class LCheckSmi: public LTemplateInstruction<0, 1> {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
@@ -1690,7 +1646,7 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
-class LTypeof: public LTemplateInstruction<1, 1> {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
@@ -1700,7 +1656,7 @@ class LTypeof: public LTemplateInstruction<1, 1> {
};
-class LTypeofIs: public LTemplateInstruction<1, 1> {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
@@ -1715,7 +1671,7 @@ class LTypeofIs: public LTemplateInstruction<1, 1> {
};
-class LTypeofIsAndBranch: public LControlInstruction<1> {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
@@ -1730,7 +1686,25 @@ class LTypeofIsAndBranch: public LControlInstruction<1> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 2> {
+class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
+};
+
+
+class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
+ public:
+ explicit LIsConstructCallAndBranch(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+ "is-construct-call-and-branch")
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
@@ -1783,7 +1757,7 @@ class LChunk: public ZoneObject {
pointer_maps_(8),
inlined_closures_(1) { }
- int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
@@ -1849,7 +1823,7 @@ class LChunkBuilder BASE_EMBEDDED {
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
- instructions_pending_deoptimization_environment_(NULL),
+ instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
@@ -1900,30 +1874,30 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
- // A value in a register that may be trashed.
+ // An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
- // An operand value in a register or stack slot.
+ // An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
- // An operand value in a register, stack slot or a constant operand.
+ // An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
- // An operand value in a register or a constant operand.
+ // An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
- // An operand value in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- LOperand* UseAny(HValue* value);
-
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
@@ -1983,7 +1957,7 @@ class LChunkBuilder BASE_EMBEDDED {
int argument_count_;
LAllocator* allocator_;
int position_;
- LInstruction* instructions_pending_deoptimization_environment_;
+ LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f95755db..56a2d6f9 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -68,7 +68,9 @@ void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
}
-void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
}
@@ -375,6 +377,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ Runtime::Function* function = Runtime::FunctionForId(id);
+ Set(rax, function->nargs);
+ movq(rbx, ExternalReference(function));
+ CEntryStub ces(1);
+ ces.SaveDoubles();
+ CallStub(&ces);
+}
+
+
MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
int num_arguments) {
return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -885,6 +897,13 @@ Condition MacroAssembler::CheckSmi(Register src) {
}
+Condition MacroAssembler::CheckSmi(const Operand& src) {
+ ASSERT_EQ(0, kSmiTag);
+ testb(src, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
// Make mask 0x8000000000000001 and test that both bits are zero.
@@ -960,6 +979,27 @@ Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
}
+void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
+ if (dst.is(src)) {
+ andl(dst, Immediate(kSmiTagMask));
+ } else {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ }
+}
+
+
+void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
+ if (!(src.AddressUsesRegister(dst))) {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ } else {
+ movl(dst, src);
+ andl(dst, Immediate(kSmiTagMask));
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1386,6 +1426,68 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
+void MacroAssembler::Pushad() {
+ push(rax);
+ push(rcx);
+ push(rdx);
+ push(rbx);
+ // Not pushing rsp or rbp.
+ push(rsi);
+ push(rdi);
+ push(r8);
+ push(r9);
+ // r10 is kScratchRegister.
+ push(r11);
+ push(r12);
+ // r13 is kRootRegister.
+ push(r14);
+ // r15 is kSmiConstantRegister
+}
+
+
+void MacroAssembler::Popad() {
+ pop(r14);
+ pop(r12);
+ pop(r11);
+ pop(r9);
+ pop(r8);
+ pop(rdi);
+ pop(rsi);
+ pop(rbx);
+ pop(rdx);
+ pop(rcx);
+ pop(rax);
+}
+
+
+void MacroAssembler::Dropad() {
+ const int kRegistersPushedByPushad = 11;
+ addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize));
+}
+
+
+// Order general registers are pushed by Pushad:
+// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+ 0,
+ 1,
+ 2,
+ 3,
+ -1,
+ -1,
+ 4,
+ 5,
+ 6,
+ 7,
+ -1,
+ 8,
+ 9,
+ -1,
+ 10,
+ -1
+};
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -1439,6 +1541,18 @@ void MacroAssembler::Ret() {
}
+void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
+ if (is_uint16(bytes_dropped)) {
+ ret(bytes_dropped);
+ } else {
+ pop(scratch);
+ addq(rsp, Immediate(bytes_dropped));
+ push(scratch);
+ ret(0);
+ }
+}
+
+
void MacroAssembler::FCmp() {
fucomip();
fstp(0);
@@ -1670,10 +1784,18 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
Move(rdi, Handle<JSFunction>(function));
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Invoke the cached code.
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ if (V8::UseCrankshaft()) {
+ // Since Crankshaft can recompile a function, we need to load
+ // the Code object every time we call the function.
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(rdx, expected, actual, flag);
+ } else {
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ }
}
@@ -1734,12 +1856,24 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
}
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) {
+void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
+ bool save_doubles) {
#ifdef _WIN64
- const int kShaddowSpace = 4;
- arg_stack_space += kShaddowSpace;
+ const int kShadowSpace = 4;
+ arg_stack_space += kShadowSpace;
#endif
- if (arg_stack_space > 0) {
+ // Optionally save all XMM registers.
+ if (save_doubles) {
+ CpuFeatures::Scope scope(SSE2);
+ int space = XMMRegister::kNumRegisters * kDoubleSize +
+ arg_stack_space * kPointerSize;
+ subq(rsp, Immediate(space));
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ }
+ } else if (arg_stack_space > 0) {
subq(rsp, Immediate(arg_stack_space * kPointerSize));
}
@@ -1756,7 +1890,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) {
}
-void MacroAssembler::EnterExitFrame(int arg_stack_space) {
+void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
@@ -1764,25 +1898,31 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space) {
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, false);
}
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Registers:
// r12 : argv
-
+ if (save_doubles) {
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ }
+ }
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
- // Pop everything up to and including the arguments and the receiver
+ // Drop everything up to and including the arguments and the receiver
// from the caller stack.
lea(rsp, Operand(r12, 1 * kPointerSize));
@@ -1970,11 +2110,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register top_reg = result_end.is_valid() ? result_end : result;
- if (top_reg.is(result)) {
- addq(top_reg, Immediate(object_size));
- } else {
- lea(top_reg, Operand(result, object_size));
+ if (!top_reg.is(result)) {
+ movq(top_reg, result);
}
+ addq(top_reg, Immediate(object_size));
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(top_reg, Operand(kScratchRegister, 0));
j(above, gc_required);
@@ -2024,7 +2164,12 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
- lea(result_end, Operand(result, element_count, element_size, header_size));
+
+ // We assume that element_count*element_size + header_size does not
+ // overflow.
+ lea(result_end, Operand(element_count, element_size, header_size));
+ addq(result_end, result);
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
@@ -2070,6 +2215,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
movq(result_end, object_size);
}
addq(result_end, result);
+ j(carry, gc_required);
movq(kScratchRegister, new_space_allocation_limit);
cmpq(result_end, Operand(kScratchRegister, 0));
j(above, gc_required);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 30b9ba51..10026359 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,7 @@ class MacroAssembler: public Assembler {
void LoadRoot(Register destination, Heap::RootListIndex index);
void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(Operand with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
void StoreRoot(Register source, Heap::RootListIndex index);
@@ -152,7 +152,7 @@ class MacroAssembler: public Assembler {
//
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0);
+ void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
// Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
// memory (not GCed) on the stack accessible via StackSpaceOperand.
@@ -161,20 +161,20 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame();
+ void LeaveExitFrame(bool save_doubles = false);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
void LeaveApiExitFrame();
// Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { UNIMPLEMENTED(); }
- void PopSafepointRegisters() { UNIMPLEMENTED(); }
+ void PushSafepointRegisters() { Pushad(); }
+ void PopSafepointRegisters() { Popad(); }
static int SafepointRegisterStackIndex(int reg_code) {
- UNIMPLEMENTED();
- return 0;
+ return kSafepointPushRegisterIndices[reg_code];
}
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -272,6 +272,7 @@ class MacroAssembler: public Assembler {
// Is the value a tagged smi.
Condition CheckSmi(Register src);
+ Condition CheckSmi(const Operand& src);
// Is the value a non-negative tagged smi.
Condition CheckNonNegativeSmi(Register src);
@@ -300,6 +301,11 @@ class MacroAssembler: public Assembler {
// conversion to a smi.
Condition CheckUInteger32ValidSmiValue(Register src);
+ // Check whether src is a Smi, and set dst to zero if it is a smi,
+ // and to one if it isn't.
+ void CheckSmiToIndicator(Register dst, Register src);
+ void CheckSmiToIndicator(Register dst, const Operand& src);
+
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
@@ -534,6 +540,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
+
+ // If object is a string, its map is loaded into object_map.
+ template <typename LabelType>
+ void JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string);
+
+
template <typename LabelType>
void JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
@@ -590,6 +604,22 @@ class MacroAssembler: public Assembler {
void Call(ExternalReference ext);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Emit call to the code we are currently generating.
+ void CallSelf() {
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+ Call(self, RelocInfo::CODE_TARGET);
+ }
+
+ // Non-x64 instructions.
+ // Push/pop all general purpose registers.
+ // Does not push rsp/rbp nor any of the assembler's special purpose registers
+ // (kScratchRegister, kSmiConstantRegister, kRootRegister).
+ void Pushad();
+ void Popad();
+ // Sets the stack as after performing Popad, without actually loading the
+ // registers.
+ void Dropad();
+
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
@@ -804,6 +834,9 @@ class MacroAssembler: public Assembler {
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
+ // Call a runtime function and save the value of XMM registers.
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
@@ -887,6 +920,10 @@ class MacroAssembler: public Assembler {
void Ret();
+ // Return and drop arguments from stack, where the number of arguments
+ // may be bigger than 2^16 - 1. Requires a scratch register.
+ void Ret(int bytes_dropped, Register scratch);
+
Handle<Object> CodeObject() { return code_object_; }
@@ -923,6 +960,9 @@ class MacroAssembler: public Assembler {
bool allow_stub_calls() { return allow_stub_calls_; }
private:
+ // Order general registers are pushed by Pushad.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+ static int kSafepointPushRegisterIndices[Register::kNumRegisters];
bool generating_stub_;
bool allow_stub_calls_;
@@ -953,7 +993,7 @@ class MacroAssembler: public Assembler {
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space);
+ void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
void LeaveExitFrameEpilogue();
@@ -1436,6 +1476,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
NearLabel result_ok;
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
@@ -1570,6 +1612,17 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
template <typename LabelType>
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string);
+}
+
+
+template <typename LabelType>
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 8c1b5794..9cb88f36 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -307,28 +307,32 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss) {
+ Label* miss,
+ bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, miss,
+ support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
+ }
}
@@ -437,10 +441,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
-static bool GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc,
- Failure** failure) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : object passing the type check
@@ -504,13 +507,8 @@ static bool GenerateFastApiCall(MacroAssembler* masm,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result =
- masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
- return true;
+ return masm->TryCallApiFunctionAndReturn(&fun,
+ argc + kFastApiCallArguments + 1);
}
@@ -523,17 +521,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_(arguments),
name_(name) {}
- bool Compile(MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss,
- Failure** failure) {
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -553,8 +550,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
lookup,
name,
optimization,
- miss,
- failure);
+ miss);
} else {
CompileRegular(masm,
object,
@@ -565,23 +561,22 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
- return true;
+ return Heap::undefined_value(); // Success.
}
}
private:
- bool CompileCacheable(MacroAssembler* masm,
- JSObject* object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- const CallOptimization& optimization,
- Label* miss_label,
- Failure** failure) {
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@@ -643,13 +638,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- bool success = GenerateFastApiCall(masm,
- optimization,
- arguments_.immediate(),
- failure);
- if (!success) {
- return false;
- }
+ MaybeObject* result = GenerateFastApiCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@@ -668,7 +660,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1);
}
- return true;
+ return Heap::undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -1021,17 +1013,16 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
@@ -1095,12 +1086,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
- MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
- if (result->IsFailure()) {
- *failure = Failure::cast(result);
- return false;
- }
- return true;
+ return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
}
@@ -2135,17 +2121,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
if (depth != kInvalidProtoDepth) {
- Failure* failure;
// Move the return address on top of the stack.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(Operand(rsp, 0 * kPointerSize), rax);
// rsp[2 * kPointerSize] is uninitialized, rsp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@@ -2194,21 +2177,17 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), rcx);
- Failure* failure;
- bool success = compiler.Compile(masm(),
- object,
- holder,
- name,
- &lookup,
- rdx,
- rbx,
- rdi,
- rax,
- &miss,
- &failure);
- if (!success) {
- return failure;
- }
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ rdx,
+ rbx,
+ rdi,
+ rax,
+ &miss);
+ if (result->IsFailure()) return result;
// Restore receiver.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2459,9 +2438,17 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Handle<Map>(object->map()));
__ j(not_equal, &miss);
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ Heap::kTheHoleValueRootIndex);
+ __ j(equal, &miss);
+
// Store the value in the cell.
- __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
- __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
__ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2648,12 +2635,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
+ rdi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2812,12 +2798,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
+ rcx, rdi, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2933,7 +2918,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
+ GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3013,6 +2998,35 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
}
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayLoad(masm(),
+ rdx,
+ rax,
+ rbx,
+ rcx,
+ rax,
+ &miss,
+ &miss,
+ &miss);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
// Specialized stub for constructing objects from functions which only have only
// simple assignments of the form this.x = ...; in their body.
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
@@ -3144,6 +3158,306 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
}
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &slow);
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: index (as a smi)
+ // rdx: JSObject
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // rax: index (as a smi)
+ // rdx: receiver (JSObject)
+ // rcx: untagged index
+ // rbx: elements array
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalIntArray:
+ __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalUnsignedIntArray:
+ __ movl(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // rax: index
+ // rdx: receiver
+ // For integer array types:
+ // rcx: value
+ // For floating-point array type:
+ // xmm0: value as double.
+
+ ASSERT(kSmiValueSize == 32);
+ if (array_type == kExternalUnsignedIntArray) {
+ // For the UnsignedInt array type, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ NearLabel box_int;
+
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ // The value is zero-extended since we loaded the value from memory
+ // with movl.
+ __ cvtqsi2sd(xmm0, rcx);
+
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+ }
+
+ // Slow case: Jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rax); // name
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Return the generated code.
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+ // Get the map from the receiver.
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rdi, rcx); // Untag the index.
+ __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ NearLabel check_heap_number;
+ __ JumpIfNotSmi(rax, &check_heap_number);
+ // No more branches to slow case on this path. Key and receiver not needed.
+ __ SmiToInteger32(rdx, rax);
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ cvtlsi2ss(xmm0, rdx);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
+ __ j(not_equal, &slow);
+ // No more branches to slow case on this path.
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ // top of FPU stack: value
+ if (array_type == kExternalFloatArray) {
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray: {
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvttsd2siq(rdx, xmm0);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 3f7b1db7..31f9527a 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1119,23 +1119,30 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
}
-Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+Result VirtualFrame::CallStoreIC(Handle<String> name,
+ bool is_contextual,
+ StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
+ ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
Result value = Pop();
+ RelocInfo::Mode mode;
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(rax);
__ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
+ mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, rax, rdx);
+ mode = RelocInfo::CODE_TARGET;
}
__ Move(rcx, name);
- return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+ return RawCallCodeObject(ic, mode);
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 0479ff0c..4a9c7203 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -338,7 +338,8 @@ class VirtualFrame : public ZoneObject {
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped.
- Result CallStoreIC(Handle<String> name, bool is_contextual);
+ Result CallStoreIC(Handle<String> name, bool is_contextual,
+ StrictModeFlag strict_mode);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped.