summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2011-05-16 14:20:40 +0100
committerBen Murdoch <benm@google.com>2011-05-18 16:35:26 +0100
commitb8e0da25ee8efac3bb05cd6b2730aafbd96119f4 (patch)
treec5ef7652343a7e4b55601fa0a4c94cf46ba09585 /src
parent086aeeaae12517475c22695a200be45495516549 (diff)
downloadandroid_external_v8-b8e0da25ee8efac3bb05cd6b2730aafbd96119f4.tar.gz
android_external_v8-b8e0da25ee8efac3bb05cd6b2730aafbd96119f4.tar.bz2
android_external_v8-b8e0da25ee8efac3bb05cd6b2730aafbd96119f4.zip
Update V8 to r6387 as required by WebKit r76408
Change-Id: Icfc5385b0996bd592f8b1ac8cbb44767ee09f1f6
Diffstat (limited to 'src')
-rwxr-xr-xsrc/SConscript4
-rw-r--r--src/api.cc56
-rw-r--r--src/arm/assembler-arm.cc22
-rw-r--r--src/arm/assembler-arm.h24
-rw-r--r--src/arm/builtins-arm.cc4
-rw-r--r--src/arm/code-stubs-arm.cc14
-rw-r--r--src/arm/codegen-arm.cc3
-rw-r--r--src/arm/deoptimizer-arm.cc5
-rw-r--r--src/arm/ic-arm.cc10
-rw-r--r--src/arm/lithium-arm.cc241
-rw-r--r--src/arm/lithium-arm.h297
-rw-r--r--src/arm/lithium-codegen-arm.cc741
-rw-r--r--src/arm/lithium-codegen-arm.h37
-rw-r--r--src/arm/macro-assembler-arm.cc83
-rw-r--r--src/arm/macro-assembler-arm.h54
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc10
-rw-r--r--src/arm/regexp-macro-assembler-arm.h12
-rw-r--r--src/arm/stub-cache-arm.cc60
-rw-r--r--src/array.js32
-rw-r--r--src/assembler.cc17
-rw-r--r--src/assembler.h30
-rw-r--r--src/ast.cc31
-rw-r--r--src/ast.h4
-rw-r--r--src/builtins.cc28
-rw-r--r--src/code-stubs.cc2
-rw-r--r--src/codegen.cc11
-rwxr-xr-xsrc/compiler.cc35
-rw-r--r--src/compiler.h16
-rw-r--r--src/debug-debugger.js31
-rw-r--r--src/extensions/experimental/experimental.gyp50
-rw-r--r--src/flag-definitions.h9
-rw-r--r--src/frames.cc51
-rw-r--r--src/frames.h6
-rw-r--r--src/full-codegen.cc11
-rw-r--r--src/gdb-jit.cc1170
-rw-r--r--src/gdb-jit.h136
-rw-r--r--src/heap.cc5
-rw-r--r--src/heap.h2
-rw-r--r--src/hydrogen-instructions.cc8
-rw-r--r--src/hydrogen-instructions.h83
-rw-r--r--src/hydrogen.cc74
-rw-r--r--src/hydrogen.h18
-rw-r--r--src/ia32/assembler-ia32.cc45
-rw-r--r--src/ia32/assembler-ia32.h4
-rw-r--r--src/ia32/code-stubs-ia32.cc3
-rw-r--r--src/ia32/codegen-ia32.cc335
-rw-r--r--src/ia32/deoptimizer-ia32.cc9
-rw-r--r--src/ia32/disasm-ia32.cc28
-rw-r--r--src/ia32/full-codegen-ia32.cc334
-rw-r--r--src/ia32/ic-ia32.cc12
-rw-r--r--src/ia32/lithium-codegen-ia32.cc477
-rw-r--r--src/ia32/lithium-codegen-ia32.h34
-rw-r--r--src/ia32/lithium-gap-resolver-ia32.cc461
-rw-r--r--src/ia32/lithium-gap-resolver-ia32.h110
-rw-r--r--src/ia32/lithium-ia32.cc561
-rw-r--r--src/ia32/lithium-ia32.h1400
-rw-r--r--src/ia32/macro-assembler-ia32.cc96
-rw-r--r--src/ia32/macro-assembler-ia32.h23
-rw-r--r--src/ia32/stub-cache-ia32.cc49
-rw-r--r--src/ic.cc309
-rw-r--r--src/ic.h13
-rw-r--r--src/inspector.cc63
-rw-r--r--src/inspector.h62
-rw-r--r--src/json.js78
-rw-r--r--src/jsregexp.cc6
-rw-r--r--src/jsregexp.h2
-rw-r--r--src/lithium-allocator.cc17
-rw-r--r--src/lithium-allocator.h51
-rw-r--r--src/lithium.cc167
-rw-r--r--src/lithium.h139
-rw-r--r--src/liveedit-debugger.js8
-rw-r--r--src/liveedit.cc123
-rw-r--r--src/liveedit.h9
-rw-r--r--src/mark-compact.cc12
-rw-r--r--src/messages.js34
-rw-r--r--src/objects-debug.cc2
-rw-r--r--src/objects-inl.h65
-rw-r--r--src/objects-visiting.h2
-rw-r--r--src/objects.cc21
-rw-r--r--src/objects.h170
-rw-r--r--src/parser.cc73
-rw-r--r--src/parser.h8
-rw-r--r--src/platform-solaris.cc49
-rw-r--r--src/platform-win32.cc2
-rw-r--r--src/preparse-data.h2
-rw-r--r--src/preparser-api.cc9
-rw-r--r--src/preparser.cc8
-rw-r--r--src/preparser.h4
-rw-r--r--src/profile-generator.cc2
-rw-r--r--src/regexp-macro-assembler-irregexp.h12
-rw-r--r--src/regexp-macro-assembler-tracer.cc12
-rw-r--r--src/regexp-macro-assembler-tracer.h12
-rw-r--r--src/regexp-macro-assembler.h12
-rw-r--r--src/rewriter.cc2
-rw-r--r--src/runtime-profiler.cc7
-rw-r--r--src/runtime.cc171
-rw-r--r--src/runtime.h3
-rw-r--r--src/safepoint-table.cc95
-rw-r--r--src/safepoint-table.h127
-rw-r--r--src/scanner-base.h13
-rwxr-xr-xsrc/scanner.cc10
-rw-r--r--src/scanner.h5
-rw-r--r--src/scopeinfo.cc5
-rw-r--r--src/scopes.cc204
-rw-r--r--src/scopes.h39
-rw-r--r--src/serialize.cc10
-rw-r--r--src/stub-cache.cc171
-rw-r--r--src/stub-cache.h18
-rw-r--r--src/third_party/strongtalk/LICENSE29
-rw-r--r--src/third_party/strongtalk/README.chromium18
-rw-r--r--src/token.h6
-rw-r--r--src/type-info.cc86
-rw-r--r--src/type-info.h15
-rw-r--r--src/v8globals.h3
-rw-r--r--src/v8utils.h2
-rw-r--r--src/variables.cc6
-rw-r--r--src/variables.h7
-rw-r--r--src/version.cc4
-rw-r--r--src/x64/assembler-x64.cc20
-rw-r--r--src/x64/assembler-x64.h30
-rw-r--r--src/x64/code-stubs-x64.h1
-rw-r--r--src/x64/deoptimizer-x64.cc10
-rw-r--r--src/x64/frames-x64.h2
-rw-r--r--src/x64/ic-x64.cc8
-rw-r--r--src/x64/lithium-codegen-x64.cc1547
-rw-r--r--src/x64/lithium-codegen-x64.h249
-rw-r--r--src/x64/lithium-x64.cc1410
-rw-r--r--src/x64/lithium-x64.h1922
-rw-r--r--src/x64/macro-assembler-x64.cc2
-rw-r--r--src/x64/stub-cache-x64.cc63
-rw-r--r--src/zone-inl.h3
-rw-r--r--src/zone.cc3
-rw-r--r--src/zone.h4
133 files changed, 12083 insertions, 3323 deletions
diff --git a/src/SConscript b/src/SConscript
index 0c8e140d..708edeff 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -71,6 +71,7 @@ SOURCES = {
frames.cc
full-codegen.cc
func-name-inferrer.cc
+ gdb-jit.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
@@ -81,6 +82,7 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
+ inspector.cc
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
@@ -190,6 +192,7 @@ SOURCES = {
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
+ ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
@@ -213,6 +216,7 @@ SOURCES = {
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/lithium-x64.cc
+ x64/lithium-codegen-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
diff --git a/src/api.cc b/src/api.cc
index 110468e2..073306f0 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3266,18 +3266,35 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
}
+static bool CanBeEncodedAsSmi(void* ptr) {
+ const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ return ((address & i::kEncodablePointerMask) == 0);
+}
+
+
+static i::Smi* EncodeAsSmi(void* ptr) {
+ ASSERT(CanBeEncodedAsSmi(ptr));
+ const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
+ ASSERT(i::Internals::HasSmiTag(result));
+ ASSERT_EQ(result, i::Smi::FromInt(result->value()));
+ ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
+ return result;
+}
+
+
void v8::Object::SetPointerInInternalField(int index, void* value) {
ENTER_V8;
- i::Object* as_object = reinterpret_cast<i::Object*>(value);
- if (as_object->IsSmi()) {
- Utils::OpenHandle(this)->SetInternalField(index, as_object);
- return;
+ if (CanBeEncodedAsSmi(value)) {
+ Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
+ } else {
+ HandleScope scope;
+ i::Handle<i::Proxy> proxy =
+ i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+ if (!proxy.is_null())
+ Utils::OpenHandle(this)->SetInternalField(index, *proxy);
}
- HandleScope scope;
- i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+ ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -3299,7 +3316,8 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
- used_heap_size_(0) { }
+ used_heap_size_(0),
+ heap_size_limit_(0) { }
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
@@ -3307,6 +3325,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size_executable(
i::Heap::CommittedMemoryExecutable());
heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
+ heap_statistics->set_heap_size_limit(i::Heap::MaxReserved());
}
@@ -3560,11 +3579,13 @@ Local<Value> v8::External::Wrap(void* data) {
LOG_API("External::Wrap");
EnsureInitialized("v8::External::Wrap()");
ENTER_V8;
- i::Object* as_object = reinterpret_cast<i::Object*>(data);
- if (as_object->IsSmi()) {
- return Utils::ToLocal(i::Handle<i::Object>(as_object));
- }
- return ExternalNewImpl(data);
+
+ v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
+ ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
+ : v8::Local<v8::Value>(ExternalNewImpl(data));
+
+ ASSERT_EQ(data, Unwrap(result));
+ return result;
}
@@ -3572,7 +3593,7 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
- return value;
+ return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsProxy()) {
return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
} else {
@@ -3586,8 +3607,7 @@ void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
- // The external value was an aligned pointer.
- result = *obj;
+ result = i::Internals::GetExternalPointerFromSmi(*obj);
} else if (obj->IsProxy()) {
result = ExternalValueImpl(obj);
} else {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index fbe97ad9..11a9c393 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2337,34 +2337,28 @@ void Assembler::vdiv(const DwVfpRegister dst,
void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
- const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
- // We set bit E, as we want any NaN to set the cumulative exception flag
- // in the FPSCR.
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=1 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B7 | B6 | src2.code());
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
- const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406A, A8-570.
- // We set bit E, as we want any NaN to set the cumulative exception flag
- // in the FPSCR.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=1 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B7 | B6);
+ src1.code()*B12 | 0x5*B9 | B8 | B6);
}
@@ -2501,6 +2495,10 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
+ // No relocation info should be pending while using db. db is used
+ // to write pure data with no pointers and the constant pool should
+ // be emitted before using db.
+ ASSERT(num_prinfo_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2508,6 +2506,10 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
+ // No relocation info should be pending while using dd. dd is used
+ // to write pure data with no pointers and the constant pool should
+ // be emitted before using dd.
+ ASSERT(num_prinfo_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 7e8c084c..ad1bdabd 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -167,6 +167,9 @@ struct SwVfpRegister {
struct DwVfpRegister {
// d0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded. This should be revisited.
+ // Currently d0 is used as a scratch register.
+ // d1 has also been excluded from allocation to be used as a scratch
+ // register as well.
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 15;
@@ -298,12 +301,17 @@ const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
-static const uint32_t kVFPExceptionMask = 0xf;
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
+static const uint32_t kVFPZConditionFlagBit = 1 << 30;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPInvalidExceptionBit = 1;
+
+static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
@@ -1147,11 +1155,9 @@ class Assembler : public Malloced {
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
- const SBit s = LeaveCC,
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const double src2,
- const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
@@ -1234,8 +1240,10 @@ class Assembler : public Malloced {
// Use --code-comments to enable.
void RecordComment(const char* msg);
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
+ // Writes a single byte or word of data in the code stream. Used
+ // for inline tables, e.g., jump-tables. The constant pool should be
+ // emitted before any use of db and dd to ensure that constant pools
+ // are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 6480a916..0210b1b9 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -502,7 +502,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@@ -546,7 +546,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
__ Assert(eq, "Unexpected string wrapper instance size");
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand(0));
+ __ cmp(r4, Operand(0, RelocInfo::NONE));
__ Assert(eq, "Unexpected unused properties of string wrapper");
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index e72c5d30..8589cf0e 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -866,8 +866,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
__ sub(probe, probe, Operand(kHeapObjectTag));
__ vldr(d1, probe, HeapNumber::kValueOffset);
- __ vcmp(d0, d1);
- __ vmrs(pc);
+ __ VFPCompareAndSetFlags(d0, d1);
__ b(ne, not_found); // The cache did not contain this value.
__ b(&load_result_from_cache);
} else {
@@ -975,8 +974,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope scope(VFP3);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ vcmp(d7, d6);
- __ vmrs(pc); // Move vector status bits to normal status bits.
+ __ VFPCompareAndSetFlags(d7, d6);
Label nan;
__ b(vs, &nan);
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
@@ -1096,8 +1094,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ sub(ip, tos_, Operand(kHeapObjectTag));
__ vldr(d1, ip, HeapNumber::kValueOffset);
- __ vcmp(d1, 0.0);
- __ vmrs(pc);
+ __ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
@@ -2519,7 +2516,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(r0, Operand(false));
+ __ mov(r0, Operand(false, RelocInfo::NONE));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
@@ -4915,8 +4912,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ vldr(d1, r2, HeapNumber::kValueOffset);
// Compare operands
- __ vcmp(d0, d1);
- __ vmrs(pc); // Move vector status bits to normal status bits.
+ __ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
__ b(vs, &unordered);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index d41c1d23..4a982f6e 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -4667,8 +4667,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ mov(scratch2, Operand(0x7FF00000));
__ mov(scratch1, Operand(0, RelocInfo::NONE));
__ vmov(d1, scratch1, scratch2); // Load infinity into d1.
- __ vcmp(d0, d1);
- __ vmrs(pc);
+ __ VFPCompareAndSetFlags(d0, d1);
runtime.Branch(eq); // d0 reached infinity.
__ vdiv(d0, d2, d0);
__ b(&allocate_return);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3917d6df..8a53d1cb 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -55,8 +55,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
- int deoptimization_index = table.GetDeoptimizationIndex(i);
- int gap_code_size = table.GetGapCodeSize(i);
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ int deoptimization_index = safepoint_entry.deoptimization_index();
+ int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index e5a1bae9..6120bba4 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -542,8 +542,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.
@@ -1591,7 +1595,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
+ __ teq(r1, Operand(0x00, RelocInfo::NONE));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index e53e96da..b51633e7 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -93,32 +93,6 @@ void LLabel::PrintDataTo(StringStream* stream) const {
}
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
- }
- return true;
-}
-
-
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- for (int i = move_operands_.length() - 1; i >= 0; --i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* from = move_operands_[i].from();
- LOperand* to = move_operands_[i].to();
- if (from->Equals(to)) {
- to->PrintTo(stream);
- } else {
- to->PrintTo(stream);
- stream->Add(" = ");
- from->PrintTo(stream);
- }
- stream->Add("; ");
- }
- }
-}
-
-
bool LGap::IsRedundant() const {
for (int i = 0; i < 4; i++) {
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
@@ -270,6 +244,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
}
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) const {
stream->Add("[r2] #%d / ", arity());
}
@@ -597,6 +576,13 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
@@ -702,13 +688,6 @@ LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
}
-LOperand* LChunkBuilder::Temp() {
- LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
@@ -913,59 +892,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-void LEnvironment::WriteTranslation(LCodeGen* cgen,
- Translation* translation) const {
- if (this == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - parameter_count();
-
- outer()->WriteTranslation(cgen, translation);
- int closure_id = cgen->DefineDeoptimizationLiteral(closure());
- translation->BeginFrame(ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (spilled_registers_ != NULL && value != NULL) {
- if (value->IsRegister() &&
- spilled_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_registers_[value->index()],
- HasTaggedValueAt(i));
- } else if (value->IsDoubleRegister() &&
- spilled_double_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_double_registers_[value->index()],
- false);
- }
- }
-
- cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) const {
- stream->Add("[id=%d|", ast_id());
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
@@ -988,11 +914,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
} else {
- op = UseOrConstant(value);
- if (op->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(op);
- unalloc->set_policy(LUnallocated::ANY);
- }
+ op = UseAny(value);
}
result->AddValue(op, value->representation());
}
@@ -1024,7 +946,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(),
- TempRegister(),
first_id,
second_id);
} else if (v->IsCompare()) {
@@ -1032,22 +953,21 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
Token::Value op = compare->token();
HValue* left = compare->left();
HValue* right = compare->right();
- if (left->representation().IsInteger32()) {
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right),
first_id,
- second_id,
- false);
- } else if (left->representation().IsDouble()) {
+ second_id);
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right),
first_id,
- second_id,
- true);
+ second_id);
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
@@ -1085,7 +1005,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
ASSERT(compare->value()->representation().IsTagged());
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- compare->is_strict(),
first_id,
second_id);
} else if (v->IsIsObject()) {
@@ -1209,7 +1128,8 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = new LUnaryMathOperation(input);
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+ LInstruction* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1326,12 +1246,15 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- FixedTemp(r1);
+ // TODO(1042) The fixed register allocation
+ // is needed because we call GenericBinaryOpStub from
+ // the generated code, which requires registers r0
+ // and r1 to be used. We should remove that
+ // when we provide a native implementation.
LOperand* value = UseFixed(instr->left(), r0);
- LOperand* divisor = UseRegister(instr->right());
- return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0));
+ LOperand* divisor = UseFixed(instr->right(), r1);
+ return AssignEnvironment(AssignPointerMap(
+ DefineFixed(new LDivI(value, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1340,18 +1263,17 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
+ // TODO(1042) The fixed register allocation
+ // is needed because we call GenericBinaryOpStub from
+ // the generated code, which requires registers r0
+ // and r1 to be used. We should remove that
+ // when we provide a native implementation.
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- FixedTemp(r1);
LOperand* value = UseFixed(instr->left(), r0);
- LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineFixed(new LModI(value, divisor), r1);
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- result = AssignEnvironment(result);
- }
+ LOperand* divisor = UseFixed(instr->right(), r1);
+ LInstruction* result = DefineFixed(new LModI(value, divisor), r0);
+ result = AssignEnvironment(AssignPointerMap(result));
return result;
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
@@ -1437,17 +1359,22 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
- if (instr->left()->representation().IsInteger32()) {
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, false));
- } else if (instr->left()->representation().IsDouble()) {
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, true));
+ return DefineAsRegister(new LCmpID(left, right));
} else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
@@ -1470,8 +1397,7 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsNull(value,
- instr->is_strict()));
+ return DefineAsRegister(new LIsNull(value));
}
@@ -1511,8 +1437,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseTempRegister(instr->value());
-
- return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+ return DefineSameAsFirst(new LClassOfTest(value));
}
@@ -1625,11 +1550,7 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LInstruction* result =
- new LCheckPrototypeMaps(temp1,
- temp2,
- instr->holder(),
- instr->receiver_map());
+ LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
}
@@ -1669,7 +1590,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
} else {
- Abort("unsupported constant of type double");
+ UNREACHABLE();
return NULL;
}
}
@@ -1688,6 +1609,11 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ return DefineAsRegister(new LLoadContextSlot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new LLoadNamedField(UseRegisterAtStart(instr->object())));
@@ -1716,23 +1642,12 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Representation r = instr->representation();
- LOperand* obj = UseRegisterAtStart(instr->object());
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* load_result = NULL;
- // Double needs an extra temp, because the result is converted from heap
- // number to a double register.
- if (r.IsDouble()) load_result = TempRegister();
- LInstruction* result = new LLoadKeyedFastElement(obj,
- key,
- load_result);
- if (r.IsDouble()) {
- result = DefineAsRegister(result);
- } else {
- result = DefineSameAsFirst(result);
- }
- return AssignEnvironment(result);
+ LInstruction* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1789,13 +1704,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? UseTempRegister(instr->value())
: UseRegister(instr->value());
- return new LStoreNamedField(obj,
- instr->name(),
- val,
- instr->is_in_object(),
- instr->offset(),
- needs_write_barrier,
- instr->transition());
+ return new LStoreNamedField(obj, val);
}
@@ -1803,7 +1712,7 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
- LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+ LInstruction* result = new LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
@@ -1829,8 +1738,9 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LInstruction* result = new LDeleteProperty(Use(instr->object()),
- UseOrConstant(instr->key()));
+ LOperand* object = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterAtStart(instr->key());
+ LInstruction* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1944,21 +1854,4 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-void LPointerMap::RecordPointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) const {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("} @%d", position());
-}
-
} } // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 4ddb281b..aab40811 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -38,8 +38,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
// Type hierarchy:
@@ -78,6 +76,7 @@ class Translation;
// LCallNamed
// LCallRuntime
// LCallStub
+// LCheckPrototypeMaps
// LConstant
// LConstantD
// LConstantI
@@ -87,7 +86,8 @@ class Translation;
// LGlobalObject
// LGlobalReceiver
// LLabel
-// LLayzBailout
+// LLazyBailout
+// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
@@ -111,7 +111,6 @@ class Translation;
// LCheckFunction
// LCheckInstanceType
// LCheckMap
-// LCheckPrototypeMaps
// LCheckSmi
// LClassOfTest
// LClassOfTestAndBranch
@@ -223,6 +222,7 @@ class Translation;
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
@@ -332,27 +332,6 @@ class LInstruction: public ZoneObject {
};
-class LParallelMove : public ZoneObject {
- public:
- LParallelMove() : move_operands_(4) { }
-
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
- }
-
- bool IsRedundant() const;
-
- const ZoneList<LMoveOperands>* move_operands() const {
- return &move_operands_;
- }
-
- void PrintDataTo(StringStream* stream) const;
-
- private:
- ZoneList<LMoveOperands> move_operands_;
-};
-
-
class LGap: public LInstruction {
public:
explicit LGap(HBasicBlock* block)
@@ -466,6 +445,10 @@ class LCallStub: public LInstruction {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+ TranscendentalCache::Type transcendental_type() {
+ return hydrogen()->transcendental_type();
+ }
};
@@ -602,29 +585,26 @@ class LMulI: public LBinaryOperation {
class LCmpID: public LBinaryOperation {
public:
- LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
- : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+ LCmpID(LOperand* left, LOperand* right)
+ : LBinaryOperation(left, right) { }
- Token::Value op() const { return op_; }
- bool is_double() const { return is_double_; }
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
-
- private:
- Token::Value op_;
- bool is_double_;
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
};
class LCmpIDAndBranch: public LCmpID {
public:
- LCmpIDAndBranch(Token::Value op,
- LOperand* left,
+ LCmpIDAndBranch(LOperand* left,
LOperand* right,
int true_block_id,
- int false_block_id,
- bool is_double)
- : LCmpID(op, left, right, is_double),
+ int false_block_id)
+ : LCmpID(left, right),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@@ -643,14 +623,18 @@ class LCmpIDAndBranch: public LCmpID {
class LUnaryMathOperation: public LUnaryOperation {
public:
- explicit LUnaryMathOperation(LOperand* value)
- : LUnaryOperation(value) { }
+ explicit LUnaryMathOperation(LOperand* value, LOperand* temp)
+ : LUnaryOperation(value), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
BuiltinFunctionId op() const { return hydrogen()->op(); }
+ LOperand* temp() const { return temp_; }
+
+ private:
+ LOperand* temp_;
};
@@ -687,25 +671,21 @@ class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
class LIsNull: public LUnaryOperation {
public:
- LIsNull(LOperand* value, bool is_strict)
- : LUnaryOperation(value), is_strict_(is_strict) {}
+ explicit LIsNull(LOperand* value) : LUnaryOperation(value) {}
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+ DECLARE_HYDROGEN_ACCESSOR(IsNull);
- bool is_strict() const { return is_strict_; }
-
- private:
- bool is_strict_;
+ bool is_strict() const { return hydrogen()->is_strict(); }
};
class LIsNullAndBranch: public LIsNull {
public:
LIsNullAndBranch(LOperand* value,
- bool is_strict,
int true_block_id,
int false_block_id)
- : LIsNull(value, is_strict),
+ : LIsNull(value),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@@ -865,18 +845,12 @@ class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
class LClassOfTest: public LUnaryOperation {
public:
- LClassOfTest(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temporary_(temp) {}
+ explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
virtual void PrintDataTo(StringStream* stream) const;
-
- LOperand* temporary() { return temporary_; }
-
- private:
- LOperand *temporary_;
};
@@ -884,11 +858,10 @@ class LClassOfTestAndBranch: public LClassOfTest {
public:
LClassOfTestAndBranch(LOperand* value,
LOperand* temporary,
- LOperand* temporary2,
int true_block_id,
int false_block_id)
- : LClassOfTest(value, temporary),
- temporary2_(temporary2),
+ : LClassOfTest(value),
+ temporary_(temporary),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@@ -899,10 +872,10 @@ class LClassOfTestAndBranch: public LClassOfTest {
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
- LOperand* temporary2() { return temporary2_; }
+ LOperand* temporary() { return temporary_; }
private:
- LOperand* temporary2_;
+ LOperand* temporary_;
int true_block_id_;
int false_block_id_;
};
@@ -1263,21 +1236,14 @@ class LLoadElements: public LUnaryOperation {
class LLoadKeyedFastElement: public LBinaryOperation {
public:
- LLoadKeyedFastElement(LOperand* elements,
- LOperand* key,
- LOperand* load_result)
- : LBinaryOperation(elements, key),
- load_result_(load_result) { }
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key)
+ : LBinaryOperation(elements, key) { }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
LOperand* elements() const { return left(); }
LOperand* key() const { return right(); }
- LOperand* load_result() const { return load_result_; }
-
- private:
- LOperand* load_result_;
};
@@ -1309,6 +1275,20 @@ class LStoreGlobal: public LUnaryOperation {
};
+class LLoadContextSlot: public LInstruction {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() const {
+ return hydrogen()->context_chain_length();
+ }
+ int slot_index() const { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LUnaryOperation {
public:
explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
@@ -1513,63 +1493,46 @@ class LSmiUntag: public LUnaryOperation {
class LStoreNamed: public LInstruction {
public:
- LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
- : object_(obj), name_(name), value_(val) { }
+ LStoreNamed(LOperand* obj, LOperand* val)
+ : object_(obj), value_(val) { }
DECLARE_INSTRUCTION(StoreNamed)
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
virtual void PrintDataTo(StringStream* stream) const;
LOperand* object() const { return object_; }
- Handle<Object> name() const { return name_; }
+ Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() const { return value_; }
private:
LOperand* object_;
- Handle<Object> name_;
LOperand* value_;
};
class LStoreNamedField: public LStoreNamed {
public:
- LStoreNamedField(LOperand* obj,
- Handle<Object> name,
- LOperand* val,
- bool in_object,
- int offset,
- bool needs_write_barrier,
- Handle<Map> transition)
- : LStoreNamed(obj, name, val),
- is_in_object_(in_object),
- offset_(offset),
- needs_write_barrier_(needs_write_barrier),
- transition_(transition) { }
+ LStoreNamedField(LOperand* obj, LOperand* val)
+ : LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- bool is_in_object() { return is_in_object_; }
- int offset() { return offset_; }
- bool needs_write_barrier() { return needs_write_barrier_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
-
- private:
- bool is_in_object_;
- int offset_;
- bool needs_write_barrier_;
- Handle<Map> transition_;
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+ Handle<Map> transition() { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LStoreNamed {
public:
- LStoreNamedGeneric(LOperand* obj,
- Handle<Object> name,
- LOperand* val)
- : LStoreNamed(obj, name, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val)
+ : LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
@@ -1647,27 +1610,21 @@ class LCheckMap: public LUnaryOperation {
class LCheckPrototypeMaps: public LInstruction {
public:
- LCheckPrototypeMaps(LOperand* temp1,
- LOperand* temp2,
- Handle<JSObject> holder,
- Handle<Map> receiver_map)
- : temp1_(temp1),
- temp2_(temp2),
- holder_(holder),
- receiver_map_(receiver_map) { }
+ LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)
+ : temp1_(temp1), temp2_(temp2) { }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
LOperand* temp1() const { return temp1_; }
LOperand* temp2() const { return temp2_; }
- Handle<JSObject> holder() const { return holder_; }
- Handle<Map> receiver_map() const { return receiver_map_; }
private:
LOperand* temp1_;
LOperand* temp2_;
- Handle<JSObject> holder_;
- Handle<Map> receiver_map_;
};
@@ -1807,108 +1764,6 @@ class LStackCheck: public LInstruction {
};
-class LPointerMap: public ZoneObject {
- public:
- explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
- int position() const { return position_; }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- ASSERT(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op);
- void PrintTo(StringStream* stream) const;
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- int ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer)
- : closure_(closure),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- values_(value_count),
- representations_(value_count),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer) {
- }
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
-
- void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
- representations_.Add(representation);
- }
-
- bool HasTaggedValueAt(int index) const {
- return representations_[index].IsTagged();
- }
-
- void Register(int deoptimization_index, int translation_index) {
- ASSERT(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
- // Emit frame translation commands for this environment.
- void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
-
- void PrintTo(StringStream* stream) const;
-
- private:
- Handle<JSFunction> closure_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- int ast_id_;
- int parameter_count_;
- ZoneList<LOperand*> values_;
- ZoneList<Representation> representations_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
-
- LEnvironment* outer_;
-};
-
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@@ -2032,15 +1887,25 @@ class LChunkBuilder BASE_EMBEDDED {
LOperand* UseRegister(HValue* value);
LOperand* UseRegisterAtStart(HValue* value);
- // A value in a register that may be trashed.
+ // An input operand in a register that may be trashed.
LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
LOperand* Use(HValue* value);
LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
LOperand* UseOrConstant(HValue* value);
LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
LOperand* UseRegisterOrConstant(HValue* value);
LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ LOperand* UseAny(HValue* value);
+
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
LInstruction* Define(LInstruction* instr, LUnallocated* result);
@@ -2069,8 +1934,6 @@ class LChunkBuilder BASE_EMBEDDED {
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
- // Temporary operand that may be a memory location.
- LOperand* Temp();
// Temporary operand that must be in a register.
LUnallocated* TempRegister();
LOperand* FixedTemp(Register reg);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index f53cebbb..55df8b4c 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -54,6 +54,157 @@ class SafepointGenerator : public PostCallGenerator {
};
+class LGapNode: public ZoneObject {
+ public:
+ explicit LGapNode(LOperand* operand)
+ : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+ LOperand* operand() const { return operand_; }
+ bool IsResolved() const { return !IsAssigned() || resolved_; }
+ void MarkResolved() {
+ ASSERT(!IsResolved());
+ resolved_ = true;
+ }
+ int visited_id() const { return visited_id_; }
+ void set_visited_id(int id) {
+ ASSERT(id > visited_id_);
+ visited_id_ = id;
+ }
+
+ bool IsAssigned() const { return assigned_from_.is_set(); }
+ LGapNode* assigned_from() const { return assigned_from_.get(); }
+ void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+ LOperand* operand_;
+ SetOncePointer<LGapNode> assigned_from_;
+ bool resolved_;
+ int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+ : nodes_(32),
+ identified_cycles_(4),
+ result_(16),
+ next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+ const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand) {
+ nodes_.Rewind(0);
+ identified_cycles_.Rewind(0);
+ result_.Rewind(0);
+ next_visited_id_ = 0;
+
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) RegisterMove(move);
+ }
+
+ for (int i = 0; i < identified_cycles_.length(); ++i) {
+ ResolveCycle(identified_cycles_[i], marker_operand);
+ }
+
+ int unresolved_nodes;
+ do {
+ unresolved_nodes = 0;
+ for (int j = 0; j < nodes_.length(); j++) {
+ LGapNode* node = nodes_[j];
+ if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+ AddResultMove(node->assigned_from(), node);
+ node->MarkResolved();
+ }
+ if (!node->IsResolved()) ++unresolved_nodes;
+ }
+ } while (unresolved_nodes > 0);
+ return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+ AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+ result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+ ZoneList<LOperand*> cycle_operands(8);
+ cycle_operands.Add(marker_operand);
+ LGapNode* cur = start;
+ do {
+ cur->MarkResolved();
+ cycle_operands.Add(cur->operand());
+ cur = cur->assigned_from();
+ } while (cur != start);
+ cycle_operands.Add(marker_operand);
+
+ for (int i = cycle_operands.length() - 1; i > 0; --i) {
+ LOperand* from = cycle_operands[i];
+ LOperand* to = cycle_operands[i - 1];
+ AddResultMove(from, to);
+ }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+ ASSERT(a != b);
+ LGapNode* cur = a;
+ while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+ cur->set_visited_id(visited_id);
+ cur = cur->assigned_from();
+ }
+
+ return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+ ASSERT(a != b);
+ return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+ if (move.source()->IsConstantOperand()) {
+ // Constant moves should be last in the machine code. Therefore add them
+ // first to the result set.
+ AddResultMove(move.source(), move.destination());
+ } else {
+ LGapNode* from = LookupNode(move.source());
+ LGapNode* to = LookupNode(move.destination());
+ if (to->IsAssigned() && to->assigned_from() == from) {
+ move.Eliminate();
+ return;
+ }
+ ASSERT(!to->IsAssigned());
+ if (CanReach(from, to)) {
+ // This introduces a cycle. Save.
+ identified_cycles_.Add(from);
+ }
+ to->set_assigned_from(from);
+ }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+ for (int i = 0; i < nodes_.length(); ++i) {
+ if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+ }
+
+ // No node found => create a new one.
+ LGapNode* result = new LGapNode(operand);
+ nodes_.Add(result);
+ return result;
+}
+
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -190,6 +341,11 @@ bool LCodeGen::GenerateDeferredCode() {
__ jmp(code->exit());
}
+ // Force constant pool emission at the end of deferred code to make
+ // sure that no constant pools are emitted after the official end of
+ // the instruction sequence.
+ masm()->CheckConstPool(true, false);
+
// Deferred code is the last part of the instruction sequence. Mark
// the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
@@ -324,6 +480,45 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
}
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@@ -439,7 +634,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
++frame_count;
}
Translation translation(&translations_, frame_count);
- environment->WriteTranslation(this, &translation);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
environment->Register(deoptimization_index, translation.index());
deoptimizations_.Add(environment);
@@ -575,6 +770,27 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepointWithRegistersAndDoubles(
+ LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index) {
+ const ZoneList<LOperand*>* operands = pointers->operands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepointWithRegistersAndDoubles(
+ masm(), arguments, deoptimization_index);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister()) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
+ }
+ }
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp);
+}
+
+
void LCodeGen::RecordPosition(int position) {
if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
@@ -601,12 +817,12 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
Register core_scratch = scratch0();
bool destroys_core_scratch = false;
- LGapResolver resolver(move->move_operands(), &marker_operand);
- const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+ const ZoneList<LMoveOperands>* moves =
+ resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
- LOperand* from = move.from();
- LOperand* to = move.to();
+ LOperand* from = move.source();
+ LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(dbl_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
@@ -771,7 +987,9 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
break;
}
case CodeStub::TranscendentalCache: {
- Abort("TranscendentalCache unimplemented.");
+ __ ldr(r0, MemOperand(sp, 0));
+ TranscendentalCacheStub stub(instr->transcendental_type());
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -786,12 +1004,155 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- Abort("DoModI unimplemented.");
+ class DeferredModI: public LDeferredCode {
+ public:
+ DeferredModI(LCodeGen* codegen, LModI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
+ }
+ private:
+ LModI* instr_;
+ };
+ // These registers hold untagged 32 bit values.
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ Label deoptimize, done;
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ tst(right, Operand(right));
+ __ b(eq, &deoptimize);
+ }
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label ok;
+ __ tst(left, Operand(left));
+ __ b(ne, &ok);
+ __ tst(right, Operand(right));
+ __ b(pl, &ok);
+ __ b(al, &deoptimize);
+ __ bind(&ok);
+ }
+
+ // Call the generic stub. The numbers in r0 and r1 have
+ // to be tagged to Smis. If that is not possible, deoptimize.
+ DeferredModI* deferred = new DeferredModI(this, instr);
+ __ TrySmiTag(left, &deoptimize, scratch);
+ __ TrySmiTag(right, &deoptimize, scratch);
+
+ __ b(al, deferred->entry());
+ __ bind(deferred->exit());
+
+ // If the result in r0 is a Smi, untag it, else deoptimize.
+ __ BranchOnNotSmi(result, &deoptimize);
+ __ mov(result, Operand(result, ASR, 1));
+
+ __ b(al, &done);
+ __ bind(&deoptimize);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&done);
}
void LCodeGen::DoDivI(LDivI* instr) {
- Abort("DoDivI unimplemented.");
+ class DeferredDivI: public LDeferredCode {
+ public:
+ DeferredDivI(LCodeGen* codegen, LDivI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
+ }
+ private:
+ LDivI* instr_;
+ };
+
+ const Register left = ToRegister(instr->left());
+ const Register right = ToRegister(instr->right());
+ const Register scratch = scratch0();
+ const Register result = ToRegister(instr->result());
+
+ // Check for x / 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ tst(right, right);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label left_not_zero;
+ __ tst(left, Operand(left));
+ __ b(ne, &left_not_zero);
+ __ tst(right, Operand(right));
+ DeoptimizeIf(mi, instr->environment());
+ __ bind(&left_not_zero);
+ }
+
+ // Check for (-kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmp(left, Operand(kMinInt));
+ __ b(ne, &left_not_min_int);
+ __ cmp(right, Operand(-1));
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
+ Label done, deoptimize;
+ // Test for a few common cases first.
+ __ cmp(right, Operand(1));
+ __ mov(result, left, LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ cmp(right, Operand(2));
+ __ tst(left, Operand(1), eq);
+ __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ cmp(right, Operand(4));
+ __ tst(left, Operand(3), eq);
+ __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
+ __ b(eq, &done);
+
+ // Call the generic stub. The numbers in r0 and r1 have
+ // to be tagged to Smis. If that is not possible, deoptimize.
+ DeferredDivI* deferred = new DeferredDivI(this, instr);
+
+ __ TrySmiTag(left, &deoptimize, scratch);
+ __ TrySmiTag(right, &deoptimize, scratch);
+
+ __ b(al, deferred->entry());
+ __ bind(deferred->exit());
+
+ // If the result in r0 is a Smi, untag it, else deoptimize.
+ __ BranchOnNotSmi(result, &deoptimize);
+ __ SmiUntag(result);
+ __ b(&done);
+
+ __ bind(&deoptimize);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr,
+ Token::Value op) {
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+
+ __ PushSafepointRegistersAndDoubles();
+ GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
+ __ CallStub(&stub);
+ RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
+ 0,
+ Safepoint::kNoDeoptimizationIndex);
+ // Overwrite the stored value of r0 with the result of the stub.
+ __ str(r0, MemOperand(sp, DwVfpRegister::kNumAllocatableRegisters *
+ kDoubleSize));
+ __ PopSafepointRegistersAndDoubles();
}
@@ -935,7 +1296,10 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- Abort("DoConstantD unimplemented.");
+ ASSERT(instr->result()->IsDoubleRegister());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ __ vmov(result, v);
}
@@ -956,12 +1320,26 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->input());
__ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
- Abort("DoFixedArrayLength untested.");
}
void LCodeGen::DoValueOf(LValueOf* instr) {
- Abort("DoValueOf unimplemented.");
+ Register input = ToRegister(instr->input());
+ Register result = ToRegister(instr->result());
+ Register map = ToRegister(instr->temporary());
+ ASSERT(input.is(result));
+ Label done;
+
+ // If the object is a smi return the object.
+ __ tst(input, Operand(kSmiTagMask));
+ __ b(eq, &done);
+
+ // If the object is not a value type, return the object.
+ __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+ __ b(ne, &done);
+ __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
}
@@ -969,7 +1347,6 @@ void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->input();
ASSERT(input->Equals(instr->result()));
__ mvn(ToRegister(input), Operand(ToRegister(input)));
- Abort("DoBitNotI untested.");
}
@@ -1078,14 +1455,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
DoubleRegister reg = ToDoubleRegister(instr->input());
Register scratch = scratch0();
- // Test for the double value. Zero and NaN are false.
- // Clear the Invalid cumulative exception flags.
- __ ClearFPSCRBits(kVFPInvalidExceptionBit, scratch);
- __ vcmp(reg, 0.0);
- // Retrieve the exception and status flags and
- // check for zero or an invalid exception.
- __ vmrs(scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPInvalidExceptionBit));
+ // Test the double value. Zero and NaN are false.
+ __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
+ __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
EmitBranch(true_block, false_block, ne);
} else {
ASSERT(r.IsTagged());
@@ -1112,7 +1484,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ tst(reg, Operand(kSmiTagMask));
__ b(eq, true_label);
- // Test for double values. Zero and NaN are false.
+ // Test double values. Zero and NaN are false.
Label call_stub;
DoubleRegister dbl_scratch = d0;
Register scratch = scratch0();
@@ -1122,13 +1494,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(ne, &call_stub);
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
- // Clear the Invalid cumulative exception flags.
- __ ClearFPSCRBits(kVFPInvalidExceptionBit, scratch);
- __ vcmp(dbl_scratch, 0.0);
- // Retrieve the exception and status flags and
- // check for zero or an invalid exception.
- __ vmrs(scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPInvalidExceptionBit));
+ __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
+ __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
__ b(ne, false_label);
__ b(true_label);
@@ -1148,24 +1515,47 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
- // TODO(srdjan): Perform stack overflow check if this goto needs it
- // before jumping.
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
+ // Perform stack overflow check if this goto needs it before jumping.
+ if (deferred_stack_check != NULL) {
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, chunk_->GetAssemblyLabel(block));
+ __ jmp(deferred_stack_check->entry());
+ deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+ } else {
+ __ jmp(chunk_->GetAssemblyLabel(block));
+ }
}
}
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- UNIMPLEMENTED();
+ __ PushSafepointRegisters();
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ __ PopSafepointRegisters();
}
void LCodeGen::DoGoto(LGoto* instr) {
- // TODO(srdjan): Implement deferred stack check.
- EmitGoto(instr->block_id(), NULL);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LGoto* instr_;
+ };
+
+ DeferredStackCheck* deferred = NULL;
+ if (instr->include_stack_check()) {
+ deferred = new DeferredStackCheck(this, instr);
+ }
+ EmitGoto(instr->block_id(), deferred);
}
@@ -1393,7 +1783,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
}
-// Branches to a label or falls through with the answer in the z flag. Trashes
+// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input. Only input and temp2 may alias.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
@@ -1401,17 +1791,91 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register input,
Register temp,
Register temp2) {
- Abort("EmitClassOfTest unimplemented.");
+ ASSERT(!input.is(temp));
+ ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ __ tst(input, Operand(kSmiTagMask));
+ __ b(eq, is_false);
+ __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, is_false);
+
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ __ b(eq, is_true);
+ } else {
+ __ b(eq, is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ b(ne, is_true);
+ } else {
+ __ b(ne, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(temp, FieldMemOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+ __ cmp(temp, Operand(class_name));
+ // End with the answer in flags.
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Abort("DoClassOfTest unimplemented.");
+ Register input = ToRegister(instr->input());
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ Label done, is_true, is_false;
+
+ EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
+ __ b(ne, &is_false);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Abort("DoClassOfTestAndBranch unimplemented.");
+ Register input = ToRegister(instr->input());
+ Register temp = scratch0();
+ Register temp2 = ToRegister(instr->temporary());
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+ EmitBranch(true_block, false_block, eq);
}
@@ -1482,10 +1946,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex,
- condition);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex,
- NegateCondition(condition));
+ __ LoadRoot(ToRegister(instr->result()),
+ Heap::kTrueValueRootIndex,
+ condition);
+ __ LoadRoot(ToRegister(instr->result()),
+ Heap::kFalseValueRootIndex,
+ NegateCondition(condition));
}
@@ -1528,6 +1994,14 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ // TODO(antonm): load a context with a separate instruction.
+ Register result = ToRegister(instr->result());
+ __ LoadContext(result, instr->context_chain_length());
+ __ ldr(result, ContextOperand(result, instr->slot_index()));
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register result = ToRegister(instr->result());
@@ -1636,36 +2110,18 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register key = EmitLoadRegister(instr->key(), scratch0());
- Register result;
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
-
- if (instr->load_result() != NULL) {
- result = ToRegister(instr->load_result());
- } else {
- result = ToRegister(instr->result());
- ASSERT(result.is(elements));
- }
+ ASSERT(result.is(elements));
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- // Untag and check for smi.
- __ SmiUntag(result);
- DeoptimizeIf(cs, instr->environment());
- } else if (r.IsDouble()) {
- EmitNumberUntagD(result,
- ToDoubleRegister(instr->result()),
- instr->environment());
- } else {
- // Check for the hole value.
- ASSERT(r.IsTagged());
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
+ // Check for the hole value.
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr->environment());
}
@@ -1718,7 +2174,65 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Abort("DoApplyArguments unimplemented.");
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register scratch = scratch0();
+
+ ASSERT(receiver.is(r0));
+ ASSERT(function.is(r1));
+ ASSERT(ToRegister(instr->result()).is(r0));
+
+ // If the receiver is null or undefined, we have to pass the
+ // global object as a receiver.
+ Label global_receiver, receiver_ok;
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ b(eq, &global_receiver);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ cmp(receiver, scratch);
+ __ b(ne, &receiver_ok);
+ __ bind(&global_receiver);
+ __ ldr(receiver, GlobalObjectOperand());
+ __ bind(&receiver_ok);
+
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+
+ Label invoke;
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmp(length, Operand(kArgumentsLimit));
+ DeoptimizeIf(hi, instr->environment());
+
+ // Push the receiver and use the register to keep the original
+ // number of arguments.
+ __ push(receiver);
+ __ mov(receiver, length);
+ // The arguments are at a one pointer size offset from elements.
+ __ add(elements, elements, Operand(1 * kPointerSize));
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ Label loop;
+ // length is a small non-negative integer, due to the test above.
+ __ tst(length, Operand(length));
+ __ b(eq, &invoke);
+ __ bind(&loop);
+ __ ldr(scratch, MemOperand(elements, length, LSL, 2));
+ __ push(scratch);
+ __ sub(length, length, Operand(1), SetCC);
+ __ b(ne, &loop);
+
+ __ bind(&invoke);
+ // Invoke the function. The number of arguments is stored in receiver
+ // which is r0, as expected by InvokeFunction.
+ v8::internal::ParameterCount actual(receiver);
+ SafepointGenerator safepoint_generator(this,
+ instr->pointer_map(),
+ Safepoint::kNoDeoptimizationIndex);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
@@ -1797,12 +2311,53 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- Abort("DoMathFloor unimplemented.");
+ DoubleRegister input = ToDoubleRegister(instr->input());
+ Register result = ToRegister(instr->result());
+ Register prev_fpscr = ToRegister(instr->temp());
+ SwVfpRegister single_scratch = double_scratch0().low();
+ Register scratch = scratch0();
+
+ // Set custom FPCSR:
+ // - Set rounding mode to "Round towards Minus Infinity".
+ // - Clear vfp cumulative exception flags.
+ // - Make sure Flush-to-zero mode control bit is unset.
+ __ vmrs(prev_fpscr);
+ __ bic(scratch, prev_fpscr,
+ Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
+ __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits));
+ __ vmsr(scratch);
+
+ // Convert the argument to an integer.
+ __ vcvt_s32_f64(single_scratch,
+ input,
+ Assembler::FPSCRRounding,
+ al);
+
+ // Retrieve FPSCR and check for vfp exceptions.
+ __ vmrs(scratch);
+ // Restore FPSCR
+ __ vmsr(prev_fpscr);
+ __ tst(scratch, Operand(kVFPExceptionMask));
+ DeoptimizeIf(ne, instr->environment());
+
+ // Move the result back to general purpose register r0.
+ __ vmov(result, single_scratch);
+
+ // Test for -0.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
+ __ vmov(scratch, input.high());
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- Abort("DoMathSqrt unimplemented.");
+ DoubleRegister input = ToDoubleRegister(instr->input());
+ ASSERT(ToDoubleRegister(instr->result()).is(input));
+ __ vsqrt(input, input);
}
@@ -1976,7 +2531,19 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Abort("DoInteger32ToDouble unimplemented.");
+ LOperand* input = instr->input();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ SwVfpRegister single_scratch = double_scratch0().low();
+ if (input->IsStackSlot()) {
+ Register scratch = scratch0();
+ __ ldr(scratch, ToMemOperand(input));
+ __ vmov(single_scratch, scratch);
+ } else {
+ __ vmov(single_scratch, ToRegister(input));
+ }
+ __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
}
@@ -2197,7 +2764,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&heap_number);
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
- __ vcmp(dbl_tmp, 0.0); // Sets overflow bit if NaN.
+ __ vcmp(dbl_tmp, 0.0); // Sets overflow bit in FPSCR flags if NaN.
__ vcvt_s32_f64(flt_scratch, dbl_tmp);
__ vmov(input_reg, flt_scratch); // 32-bit result of conversion.
__ vmrs(pc); // Move vector status bits to normal status bits.
@@ -2218,8 +2785,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// back to check; note that using non-overlapping s and d regs would be
// slightly faster.
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
- __ vcmp(dbl_scratch, dbl_tmp);
- __ vmrs(pc); // Move vector status bits to normal status bits.
+ __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp);
DeoptimizeIf(ne, instr->environment()); // Not equal or unordered.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ tst(input_reg, Operand(input_reg));
@@ -2322,14 +2888,15 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
-void LCodeGen::LoadPrototype(Register result,
- Handle<JSObject> prototype) {
- if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(prototype);
+ Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand(cell));
+ __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(result, Operand(prototype));
+ __ mov(result, Operand(object));
}
}
@@ -2339,11 +2906,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp2 = ToRegister(instr->temp2());
Handle<JSObject> holder = instr->holder();
- Handle<Map> receiver_map = instr->receiver_map();
- Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+ Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadPrototype(temp1, current_prototype);
+ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -2353,7 +2919,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadPrototype(temp1, current_prototype);
+ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
@@ -2624,7 +3190,14 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Abort("DoDeleteProperty unimplemented.");
+ Register object = ToRegister(instr->object());
+ Register key = ToRegister(instr->key());
+ __ Push(object, key);
+ RecordPosition(instr->pointer_map()->position());
+ SafepointGenerator safepoint_generator(this,
+ instr->pointer_map(),
+ Safepoint::kNoDeoptimizationIndex);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 608efa9e..9eed64b4 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,8 +39,30 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
+class LGapNode;
class SafepointGenerator;
+class LGapResolver BASE_EMBEDDED {
+ public:
+ LGapResolver();
+ const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand);
+
+ private:
+ LGapNode* LookupNode(LOperand* operand);
+ bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+ bool CanReach(LGapNode* a, LGapNode* b);
+ void RegisterMove(LMoveOperands move);
+ void AddResultMove(LOperand* from, LOperand* to);
+ void AddResultMove(LGapNode* from, LGapNode* to);
+ void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+ ZoneList<LGapNode*> nodes_;
+ ZoneList<LGapNode*> identified_cycles_;
+ ZoneList<LMoveOperands> result_;
+ int next_visited_id_;
+};
+
class LCodeGen BASE_EMBEDDED {
public:
@@ -71,6 +93,7 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
+ void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
@@ -80,6 +103,9 @@ class LCodeGen BASE_EMBEDDED {
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -104,6 +130,7 @@ class LCodeGen BASE_EMBEDDED {
MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
+ DwVfpRegister double_scratch0() { return d0; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -149,7 +176,7 @@ class LCodeGen BASE_EMBEDDED {
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@@ -194,6 +221,9 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
+ void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index);
void RecordPosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
@@ -239,6 +269,9 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 5cba955b..1028b0e6 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -466,6 +466,25 @@ void MacroAssembler::PopSafepointRegisters() {
}
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ kDoubleSize));
+ for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+ }
+ add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ kDoubleSize));
+ PopSafepointRegisters();
+}
+
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -519,10 +538,46 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
-void MacroAssembler::ClearFPSCRBits(uint32_t bits_to_clear, Register scratch) {
- vmrs(scratch);
- bic(scratch, scratch, Operand(bits_to_clear));
- vmsr(scratch);
+void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
+ const Register scratch,
+ const Condition cond) {
+ vmrs(scratch, cond);
+ bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
+ vmsr(scratch, cond);
+}
+
+
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Compare and move FPSCR flags to the normal condition flags.
+ VFPCompareAndLoadFlags(src1, src2, pc, cond);
+}
+
+void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
+ const double src2,
+ const Condition cond) {
+ // Compare and move FPSCR flags to the normal condition flags.
+ VFPCompareAndLoadFlags(src1, src2, pc, cond);
+}
+
+
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Register fpscr_flags,
+ const Condition cond) {
+ // Compare and load FPSCR.
+ vcmp(src1, src2, cond);
+ vmrs(fpscr_flags, cond);
+}
+
+void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
+ const double src2,
+ const Register fpscr_flags,
+ const Condition cond) {
+ // Compare and load FPSCR.
+ vcmp(src1, src2, cond);
+ vmrs(fpscr_flags, cond);
}
@@ -682,7 +737,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
Label regular_invoke;
@@ -738,6 +794,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
Call(adaptor, RelocInfo::CODE_TARGET);
+ if (post_call_generator != NULL) post_call_generator->Generate();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -750,12 +807,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
Label done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
@@ -789,7 +849,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -806,7 +867,7 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag);
+ InvokeCode(code_reg, expected, actual, flag, post_call_generator);
}
@@ -1676,10 +1737,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags) {
+ InvokeJSFlags flags,
+ PostCallGenerator* post_call_generator) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
Call(r2);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 02bc3846..324fbb2d 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -33,6 +33,9 @@
namespace v8 {
namespace internal {
+// Forward declaration.
+class PostCallGenerator;
+
// ----------------------------------------------------------------------------
// Static helper functions
@@ -229,6 +232,9 @@ class MacroAssembler: public Assembler {
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+
static int SafepointRegisterStackIndex(int reg_code);
// Load two consecutive registers with two consecutive memory locations.
@@ -243,8 +249,29 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
- // Clear FPSCR bits.
- void ClearFPSCRBits(uint32_t bits_to_clear, Register scratch);
+ // Clear specified FPSCR bits.
+ void ClearFPSCRBits(const uint32_t bits_to_clear,
+ const Register scratch,
+ const Condition cond = al);
+
+ // Compare double values and move the result to the normal condition flags.
+ void VFPCompareAndSetFlags(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void VFPCompareAndSetFlags(const DwVfpRegister src1,
+ const double src2,
+ const Condition cond = al);
+
+ // Compare double values and then load the fpscr flags to a register.
+ void VFPCompareAndLoadFlags(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+ void VFPCompareAndLoadFlags(const DwVfpRegister src1,
+ const double src2,
+ const Register fpscr_flags,
+ const Condition cond = al);
+
// ---------------------------------------------------------------------------
// Activation frames
@@ -284,7 +311,8 @@ class MacroAssembler: public Assembler {
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
@@ -296,7 +324,8 @@ class MacroAssembler: public Assembler {
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
@@ -637,7 +666,9 @@ class MacroAssembler: public Assembler {
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ PostCallGenerator* post_call_generator = NULL);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
@@ -688,6 +719,16 @@ class MacroAssembler: public Assembler {
add(reg, reg, Operand(reg), s);
}
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+ mov(scratch, reg);
+ SmiTag(scratch, SetCC);
+ b(vs, not_a_smi);
+ mov(reg, scratch);
+ }
+
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
@@ -745,7 +786,8 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Activation support.
void EnterFrame(StackFrame::Type type);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index fbcc9f7f..94da0424 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -417,8 +417,8 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
- int reg2,
- Label* on_not_equal) {
+ int reg2,
+ Label* on_not_equal) {
__ ldr(r0, register_location(reg1));
__ ldr(r1, register_location(reg2));
__ cmp(r0, r1);
@@ -426,7 +426,7 @@ void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
}
-void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c,
+void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
__ cmp(current_character(), Operand(c));
BranchOrBacktrack(ne, on_not_equal);
@@ -442,8 +442,8 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
}
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
Label* on_not_equal) {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 4e09f671..b487ba59 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -50,9 +50,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -68,9 +68,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index c2a9796c..b7ec5d24 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1332,11 +1332,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
+ MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
+ kind_);
Object* obj;
- { MaybeObject* maybe_obj =
- StubCache::ComputeCallMiss(arguments().immediate(), kind_);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
return obj;
}
@@ -1646,8 +1645,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
- GenerateNameCheck(name, &miss);
+ Label* index_out_of_range_label = &index_out_of_range;
+
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1675,7 +1681,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
@@ -1684,12 +1690,17 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
__ bind(&miss);
+ // Restore function name in r2.
+ __ Move(r2, Handle<String>(name));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1720,9 +1731,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
- GenerateNameCheck(name, &miss);
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1752,7 +1769,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
@@ -1761,12 +1778,17 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
__ bind(&miss);
+ // Restore function name in r2.
+ __ Move(r2, Handle<String>(name));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1952,7 +1974,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ cmp(r7, Operand(HeapNumber::kMantissaBits));
// If greater or equal, the argument is already round and in r0.
__ b(&restore_fpscr_and_return, ge);
- __ b(&slow);
+ __ b(&wont_fit_smi);
__ bind(&no_vfp_exception);
// Move the result back to general purpose register r0.
@@ -1965,7 +1987,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Check for -0.
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(&restore_fpscr_and_return, ne);
// r5 already holds the HeapNumber exponent.
__ tst(r5, Operand(HeapNumber::kSignMask));
@@ -1980,10 +2002,10 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ Ret();
__ bind(&wont_fit_smi);
- __ bind(&slow);
// Restore FPCSR and fall to slow case.
__ vmsr(r3);
+ __ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
diff --git a/src/array.js b/src/array.js
index 56f52549..0d7a7cbc 100644
--- a/src/array.js
+++ b/src/array.js
@@ -117,19 +117,16 @@ function Join(array, length, separator, convert) {
// Fast case for one-element arrays.
if (length == 1) {
var e = array[0];
- if (!IS_UNDEFINED(e) || (0 in array)) {
- if (IS_STRING(e)) return e;
- return convert(e);
- }
- return '';
+ if (IS_STRING(e)) return e;
+ return convert(e);
}
// Construct an array for the elements.
var elements = new $Array(length);
- var elements_length = 0;
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
+ var elements_length = 0;
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e)) {
@@ -142,16 +139,25 @@ function Join(array, length, separator, convert) {
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, '');
}
- // Non-empty separator.
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (!IS_UNDEFINED(e)) {
+ // Non-empty separator case.
+ // If the first element is a number then use the heuristic that the
+ // remaining elements are also likely to be numbers.
+ if (!IS_NUMBER(array[0])) {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
if (!IS_STRING(e)) e = convert(e);
elements[i] = e;
- } else {
- elements[i] = '';
}
- }
+ } else {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
+ else {
+ if (!IS_STRING(e)) e = convert(e);
+ elements[i] = e;
+ }
+ }
+ }
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
diff --git a/src/assembler.cc b/src/assembler.cc
index e8bcd914..fb9a4af1 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -66,6 +66,7 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
+const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
@@ -729,6 +730,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
+ExternalReference ExternalReference::address_of_minus_zero() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::minus_zero)));
+}
+
+
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));
@@ -910,6 +917,11 @@ void PositionsRecorder::RecordPosition(int pos) {
ASSERT(pos != RelocInfo::kNoPosition);
ASSERT(pos >= 0);
state_.current_position = pos;
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (gdbjit_lineinfo_ != NULL) {
+ gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
+ }
+#endif
}
@@ -917,6 +929,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
ASSERT(pos != RelocInfo::kNoPosition);
ASSERT(pos >= 0);
state_.current_statement_position = pos;
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (gdbjit_lineinfo_ != NULL) {
+ gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
+ }
+#endif
}
diff --git a/src/assembler.h b/src/assembler.h
index 0219de22..4ef61e4b 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -35,6 +35,7 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
+#include "gdb-jit.h"
#include "runtime.h"
#include "top.h"
#include "token.h"
@@ -50,6 +51,7 @@ class DoubleConstant: public AllStatic {
public:
static const double min_int;
static const double one_half;
+ static const double minus_zero;
static const double negative_infinity;
};
@@ -555,6 +557,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
+ static ExternalReference address_of_minus_zero();
static ExternalReference address_of_negative_infinity();
Address address() const {return reinterpret_cast<Address>(address_);}
@@ -635,7 +638,29 @@ struct PositionState {
class PositionsRecorder BASE_EMBEDDED {
public:
explicit PositionsRecorder(Assembler* assembler)
- : assembler_(assembler) {}
+ : assembler_(assembler) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ gdbjit_lineinfo_ = NULL;
+#endif
+ }
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ ~PositionsRecorder() {
+ delete gdbjit_lineinfo_;
+ }
+
+ void StartGDBJITLineInfoRecording() {
+ if (FLAG_gdbjit) {
+ gdbjit_lineinfo_ = new GDBJITLineInfo();
+ }
+ }
+
+ GDBJITLineInfo* DetachGDBJITLineInfo() {
+ GDBJITLineInfo* lineinfo = gdbjit_lineinfo_;
+ gdbjit_lineinfo_ = NULL; // To prevent deallocation in destructor.
+ return lineinfo;
+ }
+#endif
// Set current position to pos.
void RecordPosition(int pos);
@@ -655,6 +680,9 @@ class PositionsRecorder BASE_EMBEDDED {
private:
Assembler* assembler_;
PositionState state_;
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ GDBJITLineInfo* gdbjit_lineinfo_;
+#endif
friend class PreservePositionScope;
diff --git a/src/ast.cc b/src/ast.cc
index 1a6e7681..4fe89be1 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -166,12 +166,6 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
-bool FunctionLiteral::AllowOptimize() {
- // We can't deal with heap-allocated locals.
- return scope()->num_heap_slots() == 0;
-}
-
-
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
@@ -215,12 +209,16 @@ bool ObjectLiteral::Property::emit_store() {
bool IsEqualString(void* first, void* second) {
+ ASSERT((*reinterpret_cast<String**>(first))->IsString());
+ ASSERT((*reinterpret_cast<String**>(second))->IsString());
Handle<String> h1(reinterpret_cast<String**>(first));
Handle<String> h2(reinterpret_cast<String**>(second));
return (*h1)->Equals(*h2);
}
bool IsEqualSmi(void* first, void* second) {
+ ASSERT((*reinterpret_cast<Smi**>(first))->IsSmi());
+ ASSERT((*reinterpret_cast<Smi**>(second))->IsSmi());
Handle<Smi> h1(reinterpret_cast<Smi**>(first));
Handle<Smi> h2(reinterpret_cast<Smi**>(second));
return (*h1)->value() == (*h2)->value();
@@ -266,12 +264,12 @@ void ObjectLiteral::CalculateEmitStore() {
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED) {
- if (table->Lookup(literal, hash, false) != NULL) {
+ if (table->Lookup(key, hash, false) != NULL) {
property->set_emit_store(false);
}
}
// Add key to the table.
- table->Lookup(literal, hash, true);
+ table->Lookup(key, hash, true);
}
}
@@ -641,10 +639,19 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
}
#endif
- if (receiver_types_ != NULL && receiver_types_->length() > 0) {
- Handle<Map> type = receiver_types_->at(0);
- is_monomorphic_ = oracle->CallIsMonomorphic(this);
- if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
+ is_monomorphic_ = oracle->CallIsMonomorphic(this);
+ check_type_ = oracle->GetCallCheckType(this);
+ if (is_monomorphic_) {
+ Handle<Map> map;
+ if (receiver_types_ != NULL && receiver_types_->length() > 0) {
+ ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+ map = receiver_types_->at(0);
+ } else {
+ ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+ map = Handle<Map>(
+ oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
+ }
+ is_monomorphic_ = ComputeTarget(map, name);
}
}
diff --git a/src/ast.h b/src/ast.h
index ba422fda..f55ddcd5 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1268,6 +1268,7 @@ class Call: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
+ check_type_(RECEIVER_MAP_CHECK),
receiver_types_(NULL),
return_id_(GetNextId()) {
}
@@ -1283,6 +1284,7 @@ class Call: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
+ CheckType check_type() const { return check_type_; }
Handle<JSFunction> target() { return target_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSGlobalPropertyCell> cell() { return cell_; }
@@ -1306,6 +1308,7 @@ class Call: public Expression {
int pos_;
bool is_monomorphic_;
+ CheckType check_type_;
ZoneMapList* receiver_types_;
Handle<JSFunction> target_;
Handle<JSObject> holder_;
@@ -1714,7 +1717,6 @@ class FunctionLiteral: public Expression {
int num_parameters() { return num_parameters_; }
bool AllowsLazyCompilation();
- bool AllowOptimize();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;
diff --git a/src/builtins.cc b/src/builtins.cc
index 0c76f694..c4c9fc11 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
+#include "gdb-jit.h"
#include "ic-inl.h"
#include "vm-state-inl.h"
@@ -636,15 +637,20 @@ BUILTIN(ArraySlice) {
return CallJsBuiltin("ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
- len = elms->length();
-#ifdef DEBUG
- // Arguments object by construction should have no holes, check it.
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < len; i++) {
- ASSERT(elms->get(i) != Heap::the_hole_value());
+ Object* len_obj = JSObject::cast(receiver)
+ ->InObjectPropertyAt(Heap::arguments_length_index);
+ if (!len_obj->IsSmi()) {
+ return CallJsBuiltin("ArraySlice", args);
+ }
+ len = Smi::cast(len_obj)->value();
+ if (len > elms->length()) {
+ return CallJsBuiltin("ArraySlice", args);
+ }
+ for (int i = 0; i < len; i++) {
+ if (elms->get(i) == Heap::the_hole_value()) {
+ return CallJsBuiltin("ArraySlice", args);
}
}
-#endif
}
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -1545,7 +1551,7 @@ void Builtins::Setup(bool create_heap_objects) {
CodeDesc desc;
masm.GetCode(&desc);
Code::Flags flags = functions[i].flags;
- Object* code = 0;
+ Object* code = NULL;
{
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
@@ -1559,7 +1565,11 @@ void Builtins::Setup(bool create_heap_objects) {
}
// Log the event and add the code to the builtins array.
PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
- Code::cast(code), functions[i].s_name));
+ Code::cast(code),
+ functions[i].s_name));
+ GDBJIT(AddCode(GDBJITInterface::BUILTIN,
+ functions[i].s_name,
+ Code::cast(code)));
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index ba027e93..69f8477f 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -30,6 +30,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "factory.h"
+#include "gdb-jit.h"
#include "macro-assembler.h"
#include "oprofile-agent.h"
@@ -66,6 +67,7 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->instruction_start(),
code->instruction_size()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+ GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
diff --git a/src/codegen.cc b/src/codegen.cc
index da479e8f..c7e6f1c8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -248,6 +248,9 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
// Generate code.
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ masm.positions_recorder()->StartGDBJITLineInfoRecording();
+#endif
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
cgen.Generate(info);
@@ -263,6 +266,14 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
code->SetNoStackCheckTable();
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (!code.is_null()) {
+ GDBJITLineInfo* lineinfo =
+ masm.positions_recorder()->DetachGDBJITLineInfo();
+
+ GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
+ }
+#endif
return !code.is_null();
}
diff --git a/src/compiler.cc b/src/compiler.cc
index e4864e48..bbe7f2fc 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -35,6 +35,7 @@
#include "data-flow.h"
#include "debug.h"
#include "full-codegen.h"
+#include "gdb-jit.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "liveedit.h"
@@ -92,6 +93,25 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
}
+void CompilationInfo::DisableOptimization() {
+ if (FLAG_optimize_closures) {
+ // If we allow closures optimizations and it's an optimizable closure
+ // mark it correspondingly.
+ bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
+ if (is_closure) {
+ bool is_optimizable_closure =
+ !scope_->outer_scope_calls_eval() && !scope_->inside_with();
+ if (is_optimizable_closure) {
+ SetMode(BASE);
+ return;
+ }
+ }
+ }
+
+ SetMode(NONOPT);
+}
+
+
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@@ -188,7 +208,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
- const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? 10 : 1000;
+ const int kMaxOptCount =
+ FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not
@@ -262,7 +283,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HTracer::Instance()->TraceCompilation(info->function());
}
- TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
+ TypeFeedbackOracle oracle(
+ Handle<Code>(info->shared_info()->code()),
+ Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
@@ -399,6 +422,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
info->code()->instruction_start(),
info->code()->instruction_size()));
+ GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
+ script,
+ info->code()));
} else {
PROFILE(CodeCreateEvent(
info->is_eval()
@@ -409,6 +435,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script",
info->code()->instruction_start(),
info->code()->instruction_size()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
// Allocate function.
@@ -772,6 +799,10 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
code->instruction_size()));
}
}
+
+ GDBJIT(AddCode(name,
+ Handle<Script>(info->script()),
+ Handle<Code>(info->code())));
}
} } // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index 1176c694..44ac9c85 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -114,7 +114,7 @@ class CompilationInfo BASE_EMBEDDED {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
- void DisableOptimization() { SetMode(NONOPT); }
+ void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
@@ -125,9 +125,7 @@ class CompilationInfo BASE_EMBEDDED {
// Determine whether or not we can adaptively optimize.
bool AllowOptimize() {
- return V8::UseCrankshaft() &&
- !closure_.is_null() &&
- function_->AllowOptimize();
+ return V8::UseCrankshaft() && !closure_.is_null();
}
private:
@@ -211,9 +209,13 @@ class CompilationInfo BASE_EMBEDDED {
class Compiler : public AllStatic {
public:
- // All routines return a JSFunction.
- // If an error occurs an exception is raised and
- // the return handle contains NULL.
+ // Default maximum number of function optimization attempts before we
+ // give up.
+ static const int kDefaultMaxOptCount = 10;
+
+ // All routines return a SharedFunctionInfo.
+ // If an error occurs an exception is raised and the return handle
+ // contains NULL.
// Compile a String source within a context.
static Handle<SharedFunctionInfo> Compile(Handle<String> source,
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index dcff07cc..1adf73ac 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -112,8 +112,8 @@ var debugger_flags = {
// Create a new break point object and add it to the list of break points.
-function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
- var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
+function MakeBreakPoint(source_position, opt_script_break_point) {
+ var break_point = new BreakPoint(source_position, opt_script_break_point);
break_points.push(break_point);
return break_point;
}
@@ -123,10 +123,8 @@ function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_
// NOTE: This object does not have a reference to the function having break
// point as this would cause function not to be garbage collected when it is
// not used any more. We do not want break points to keep functions alive.
-function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
+function BreakPoint(source_position, opt_script_break_point) {
this.source_position_ = source_position;
- this.source_line_ = opt_line;
- this.source_column_ = opt_column;
if (opt_script_break_point) {
this.script_break_point_ = opt_script_break_point;
} else {
@@ -424,7 +422,7 @@ ScriptBreakPoint.prototype.set = function (script) {
if (position === null) return;
// Create a break point object and set the break point.
- break_point = MakeBreakPoint(position, this.line(), this.column(), this);
+ break_point = MakeBreakPoint(position, this);
break_point.setIgnoreCount(this.ignoreCount());
var actual_position = %SetScriptBreakPoint(script, position, break_point);
if (IS_UNDEFINED(actual_position)) {
@@ -639,7 +637,7 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
opt_condition);
} else {
// Set a break point directly on the function.
- var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
+ var break_point = MakeBreakPoint(source_position);
var actual_position =
%SetFunctionBreakPoint(func, source_position, break_point);
actual_position += this.sourcePosition(func);
@@ -652,6 +650,25 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
};
+Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
+ condition, enabled)
+{
+ break_point = MakeBreakPoint(position);
+ break_point.setCondition(condition);
+ if (!enabled)
+ break_point.disable();
+ var scripts = this.scripts();
+ for (var i = 0; i < scripts.length; i++) {
+ if (script_id == scripts[i].id) {
+ break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
+ break_point);
+ break;
+ }
+ }
+ return break_point;
+};
+
+
Debug.enableBreakPoint = function(break_point_number) {
var break_point = this.findBreakPoint(break_point_number, false);
// Only enable if the breakpoint hasn't been deleted:
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
new file mode 100644
index 00000000..73888fc0
--- /dev/null
+++ b/src/extensions/experimental/experimental.gyp
@@ -0,0 +1,50 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'variables': {
+ 'icu_src_dir%': '',
+ },
+ 'targets': [
+ {
+ 'target_name': 'i18n_api',
+ 'type': 'static_library',
+ 'sources': [
+ 'i18n-extension.cc',
+ 'i18n-extension.h',
+ ],
+ 'include_dirs': [
+ '<(icu_src_dir)/public/common',
+ '../..',
+ ],
+ 'dependencies': [
+ '<(icu_src_dir)/icu.gyp:*',
+ '../../../tools/gyp/v8.gyp:v8',
+ ],
+ },
+ ], # targets
+}
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 6e73258f..fb892d62 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -141,6 +141,7 @@ DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
+DEFINE_bool(optimize_closures, true, "optimize closures")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
@@ -365,6 +366,14 @@ DEFINE_bool(debug_script_collected_events, true,
"Enable debugger script collected events")
#endif
+
+//
+// GDB JIT integration flags.
+//
+
+DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
+DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
+
//
// Debug only flags
//
diff --git a/src/frames.cc b/src/frames.cc
index 3af72887..7f28ff17 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -329,21 +329,20 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Address pc,
- uint8_t** safepoint_entry,
+ SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
- uint8_t* cached_safepoint_entry = entry->safepoint_entry;
- if (cached_safepoint_entry == NULL) {
- cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
- ASSERT(cached_safepoint_entry != NULL); // No safepoint found.
- entry->safepoint_entry = cached_safepoint_entry;
+ SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
+ if (!entry->safepoint_entry.is_valid()) {
+ entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+ ASSERT(entry->safepoint_entry.is_valid());
} else {
- ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
+ ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
}
// Fill in the results and return the code.
Code* code = entry->code;
- *safepoint_entry = cached_safepoint_entry;
+ *safepoint_entry = entry->safepoint_entry;
*stack_slots = code->stack_slots();
return code;
}
@@ -536,7 +535,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Compute the safepoint information.
unsigned stack_slots = 0;
- uint8_t* safepoint_entry = NULL;
+ SafepointEntry safepoint_entry;
Code* code = StackFrame::GetSafepointData(
pc(), &safepoint_entry, &stack_slots);
unsigned slot_space = stack_slots * kPointerSize;
@@ -548,10 +547,23 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
Object** parameters_limit = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
+ // Visit the parameters that may be on top of the saved registers.
+ if (safepoint_entry.argument_count() > 0) {
+ v->VisitPointers(parameters_base,
+ parameters_base + safepoint_entry.argument_count());
+ parameters_base += safepoint_entry.argument_count();
+ }
+
+ // Skip saved double registers.
+ if (safepoint_entry.has_doubles()) {
+ parameters_base += DoubleRegister::kNumAllocatableRegisters *
+ kDoubleSize / kPointerSize;
+ }
+
// Visit the registers that contain pointers if any.
- if (SafepointTable::HasRegisters(safepoint_entry)) {
+ if (safepoint_entry.HasRegisters()) {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
- if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
+ if (safepoint_entry.HasRegisterAt(i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
v->VisitPointer(parameters_base + reg_stack_index);
}
@@ -561,7 +573,8 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
}
// We're done dealing with the register bits.
- safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
+ uint8_t* safepoint_bits = safepoint_entry.bits();
+ safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
// Visit the rest of the parameters.
v->VisitPointers(parameters_base, parameters_limit);
@@ -570,7 +583,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
for (unsigned index = 0; index < stack_slots; index++) {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
- if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
+ if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
v->VisitPointer(parameters_limit + index);
}
}
@@ -778,14 +791,8 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
- SafepointTable table(code);
- unsigned pc_offset = static_cast<unsigned>(pc() - code->instruction_start());
- for (unsigned i = 0; i < table.length(); i++) {
- if (table.GetPcOffset(i) == pc_offset) {
- *deopt_index = table.GetDeoptimizationIndex(i);
- break;
- }
- }
+ SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
+ *deopt_index = safepoint_entry.deoptimization_index();
ASSERT(*deopt_index != AstNode::kNoNumber);
return DeoptimizationInputData::cast(code->deoptimization_data());
@@ -1150,7 +1157,7 @@ PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
- entry->safepoint_entry = NULL;
+ entry->safepoint_entry.Reset();
entry->pc = pc;
}
return entry;
diff --git a/src/frames.h b/src/frames.h
index 778f9d24..53787090 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -28,6 +28,8 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
+#include "safepoint-table.h"
+
namespace v8 {
namespace internal {
@@ -51,7 +53,7 @@ class PcToCodeCache : AllStatic {
struct PcToCodeCacheEntry {
Address pc;
Code* code;
- uint8_t* safepoint_entry;
+ SafepointEntry safepoint_entry;
};
static PcToCodeCacheEntry* cache(int index) {
@@ -208,7 +210,7 @@ class StackFrame BASE_EMBEDDED {
// safepoint entry and the number of stack slots. The pc must be at
// a safepoint.
static Code* GetSafepointData(Address pc,
- uint8_t** safepoint_entry,
+ SafepointEntry* safepoint_entry,
unsigned* stack_slots);
virtual void Iterate(ObjectVisitor* v) const = 0;
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 58540f07..9366e427 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -286,6 +286,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ masm.positions_recorder()->StartGDBJITLineInfoRecording();
+#endif
FullCodeGenerator cgen(&masm);
cgen.Generate(info);
@@ -304,6 +307,14 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_stack_check_table_start(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (!code.is_null()) {
+ GDBJITLineInfo* lineinfo =
+ masm.positions_recorder()->DetachGDBJITLineInfo();
+
+ GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
+ }
+#endif
return !code.is_null();
}
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
new file mode 100644
index 00000000..b1782cbd
--- /dev/null
+++ b/src/gdb-jit.cc
@@ -0,0 +1,1170 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+#include "gdb-jit.h"
+
+#include "bootstrapper.h"
+#include "compiler.h"
+#include "global-handles.h"
+#include "messages.h"
+#include "natives.h"
+
+namespace v8 {
+namespace internal {
+
+class ELF;
+
+class Writer BASE_EMBEDDED {
+ public:
+ explicit Writer(ELF* elf)
+ : elf_(elf),
+ position_(0),
+ capacity_(1024),
+ buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
+ }
+
+ ~Writer() {
+ free(buffer_);
+ }
+
+ uintptr_t position() const {
+ return position_;
+ }
+
+ template<typename T>
+ class Slot {
+ public:
+ Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) { }
+
+ T* operator-> () {
+ return w_->RawSlotAt<T>(offset_);
+ }
+
+ void set(const T& value) {
+ *w_->RawSlotAt<T>(offset_) = value;
+ }
+
+ Slot<T> at(int i) {
+ return Slot<T>(w_, offset_ + sizeof(T) * i);
+ }
+
+ private:
+ Writer* w_;
+ uintptr_t offset_;
+ };
+
+ template<typename T>
+ void Write(const T& val) {
+ Ensure(position_ + sizeof(T));
+ *RawSlotAt<T>(position_) = val;
+ position_ += sizeof(T);
+ }
+
+ template<typename T>
+ Slot<T> SlotAt(uintptr_t offset) {
+ Ensure(offset + sizeof(T));
+ return Slot<T>(this, offset);
+ }
+
+ template<typename T>
+ Slot<T> CreateSlotHere() {
+ return CreateSlotsHere<T>(1);
+ }
+
+ template<typename T>
+ Slot<T> CreateSlotsHere(uint32_t count) {
+ uintptr_t slot_position = position_;
+ position_ += sizeof(T) * count;
+ Ensure(position_);
+ return SlotAt<T>(slot_position);
+ }
+
+ void Ensure(uintptr_t pos) {
+ if (capacity_ < pos) {
+ while (capacity_ < pos) capacity_ *= 2;
+ buffer_ = reinterpret_cast<byte*>(realloc(buffer_, capacity_));
+ }
+ }
+
+ ELF* elf() { return elf_; }
+
+ byte* buffer() { return buffer_; }
+
+ void Align(uintptr_t align) {
+ uintptr_t delta = position_ % align;
+ if (delta == 0) return;
+ uintptr_t padding = align - delta;
+ Ensure(position_ += padding);
+ ASSERT((position_ % align) == 0);
+ }
+
+ void WriteULEB128(uintptr_t value) {
+ do {
+ uint8_t byte = value & 0x7F;
+ value >>= 7;
+ if (value != 0) byte |= 0x80;
+ Write<uint8_t>(byte);
+ } while (value != 0);
+ }
+
+ void WriteSLEB128(intptr_t value) {
+ bool more = true;
+ while (more) {
+ int8_t byte = value & 0x7F;
+ bool byte_sign = byte & 0x40;
+ value >>= 7;
+
+ if ((value == 0 && !byte_sign) || (value == -1 && byte_sign)) {
+ more = false;
+ } else {
+ byte |= 0x80;
+ }
+
+ Write<int8_t>(byte);
+ }
+ }
+
+ void WriteString(const char* str) {
+ do {
+ Write<char>(*str);
+ } while (*str++);
+ }
+
+ private:
+ template<typename T> friend class Slot;
+
+ template<typename T>
+ T* RawSlotAt(uintptr_t offset) {
+ ASSERT(offset < capacity_ && offset + sizeof(T) <= capacity_);
+ return reinterpret_cast<T*>(&buffer_[offset]);
+ }
+
+ ELF* elf_;
+ uintptr_t position_;
+ uintptr_t capacity_;
+ byte* buffer_;
+};
+
+class StringTable;
+
+class ELFSection : public ZoneObject {
+ public:
+ struct Header {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t address;
+ uintptr_t offset;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t alignment;
+ uintptr_t entry_size;
+ };
+
+ enum Type {
+ TYPE_NULL = 0,
+ TYPE_PROGBITS = 1,
+ TYPE_SYMTAB = 2,
+ TYPE_STRTAB = 3,
+ TYPE_RELA = 4,
+ TYPE_HASH = 5,
+ TYPE_DYNAMIC = 6,
+ TYPE_NOTE = 7,
+ TYPE_NOBITS = 8,
+ TYPE_REL = 9,
+ TYPE_SHLIB = 10,
+ TYPE_DYNSYM = 11,
+ TYPE_LOPROC = 0x70000000,
+ TYPE_HIPROC = 0x7fffffff,
+ TYPE_LOUSER = 0x80000000,
+ TYPE_HIUSER = 0xffffffff
+ };
+
+ enum Flags {
+ FLAG_WRITE = 1,
+ FLAG_ALLOC = 2,
+ FLAG_EXEC = 4
+ };
+
+ enum SpecialIndexes {
+ INDEX_ABSOLUTE = 0xfff1
+ };
+
+ ELFSection(const char* name, Type type, uintptr_t align)
+ : name_(name), type_(type), align_(align) { }
+
+ virtual ~ELFSection() { }
+
+ void PopulateHeader(Writer::Slot<Header> header, StringTable* strtab);
+
+ virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ uintptr_t start = w->position();
+ if (WriteBody(w)) {
+ uintptr_t end = w->position();
+ header->offset = start;
+ header->size = end - start;
+ }
+ }
+
+ virtual bool WriteBody(Writer* w) {
+ return false;
+ }
+
+ uint16_t index() const { return index_; }
+ void set_index(uint16_t index) { index_ = index; }
+
+ protected:
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ header->flags = 0;
+ header->address = 0;
+ header->offset = 0;
+ header->size = 0;
+ header->link = 0;
+ header->info = 0;
+ header->entry_size = 0;
+ }
+
+
+ private:
+ const char* name_;
+ Type type_;
+ uintptr_t align_;
+ uint16_t index_;
+};
+
+
+class FullHeaderELFSection : public ELFSection {
+ public:
+ FullHeaderELFSection(const char* name,
+ Type type,
+ uintptr_t align,
+ uintptr_t addr,
+ uintptr_t offset,
+ uintptr_t size,
+ uintptr_t flags)
+ : ELFSection(name, type, align),
+ addr_(addr),
+ offset_(offset),
+ size_(size),
+ flags_(flags) { }
+
+ protected:
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ ELFSection::PopulateHeader(header);
+ header->address = addr_;
+ header->offset = offset_;
+ header->size = size_;
+ header->flags = flags_;
+ }
+
+ private:
+ uintptr_t addr_;
+ uintptr_t offset_;
+ uintptr_t size_;
+ uintptr_t flags_;
+};
+
+
+class StringTable : public ELFSection {
+ public:
+ explicit StringTable(const char* name)
+ : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) {
+ }
+
+ uintptr_t Add(const char* str) {
+ if (*str == '\0') return 0;
+
+ uintptr_t offset = size_;
+ WriteString(str);
+ return offset;
+ }
+
+ void AttachWriter(Writer* w) {
+ writer_ = w;
+ offset_ = writer_->position();
+
+ // First entry in the string table should be an empty string.
+ WriteString("");
+ }
+
+ void DetachWriter() {
+ writer_ = NULL;
+ }
+
+ virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ ASSERT(writer_ == NULL);
+ header->offset = offset_;
+ header->size = size_;
+ }
+
+ private:
+ void WriteString(const char* str) {
+ uintptr_t written = 0;
+ do {
+ writer_->Write(*str);
+ written++;
+ } while (*str++);
+ size_ += written;
+ }
+
+ Writer* writer_;
+
+ uintptr_t offset_;
+ uintptr_t size_;
+};
+
+
+void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
+ StringTable* strtab) {
+ header->name = strtab->Add(name_);
+ header->type = type_;
+ header->alignment = align_;
+ PopulateHeader(header);
+}
+
+
+class ELF BASE_EMBEDDED {
+ public:
+ ELF() : sections_(6) {
+ sections_.Add(new ELFSection("", ELFSection::TYPE_NULL, 0));
+ sections_.Add(new StringTable(".shstrtab"));
+ }
+
+ void Write(Writer* w) {
+ WriteHeader(w);
+ WriteSectionTable(w);
+ WriteSections(w);
+ }
+
+ ELFSection* SectionAt(uint32_t index) {
+ return sections_[index];
+ }
+
+ uint32_t AddSection(ELFSection* section) {
+ sections_.Add(section);
+ section->set_index(sections_.length() - 1);
+ return sections_.length() - 1;
+ }
+
+ private:
+ struct ELFHeader {
+ uint8_t ident[16];
+ uint16_t type;
+ uint16_t machine;
+ uint32_t version;
+ uintptr_t entry;
+ uintptr_t pht_offset;
+ uintptr_t sht_offset;
+ uint32_t flags;
+ uint16_t header_size;
+ uint16_t pht_entry_size;
+ uint16_t pht_entry_num;
+ uint16_t sht_entry_size;
+ uint16_t sht_entry_num;
+ uint16_t sht_strtab_index;
+ };
+
+
+ void WriteHeader(Writer* w) {
+ ASSERT(w->position() == 0);
+ Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
+#if defined(V8_TARGET_ARCH_IA32)
+ const uint8_t ident[16] =
+ { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+#elif defined(V8_TARGET_ARCH_X64)
+ const uint8_t ident[16] =
+ { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0 , 0, 0, 0, 0, 0, 0};
+#else
+#error Unsupported target architecture.
+#endif
+ memcpy(header->ident, ident, 16);
+ header->type = 1;
+#if defined(V8_TARGET_ARCH_IA32)
+ header->machine = 3;
+#elif defined(V8_TARGET_ARCH_X64)
+ // Processor identification value for x64 is 62 as defined in
+ // System V ABI, AMD64 Supplement
+ // http://www.x86-64.org/documentation/abi.pdf
+ header->machine = 62;
+#else
+#error Unsupported target architecture.
+#endif
+ header->version = 1;
+ header->entry = 0;
+ header->pht_offset = 0;
+ header->sht_offset = sizeof(ELFHeader); // Section table follows header.
+ header->flags = 0;
+ header->header_size = sizeof(ELFHeader);
+ header->pht_entry_size = 0;
+ header->pht_entry_num = 0;
+ header->sht_entry_size = sizeof(ELFSection::Header);
+ header->sht_entry_num = sections_.length();
+ header->sht_strtab_index = 1;
+ }
+
+ void WriteSectionTable(Writer* w) {
+ // Section headers table immediately follows file header.
+ ASSERT(w->position() == sizeof(ELFHeader));
+
+ Writer::Slot<ELFSection::Header> headers =
+ w->CreateSlotsHere<ELFSection::Header>(sections_.length());
+
+ // String table for section table is the first section.
+ StringTable* strtab = static_cast<StringTable*>(SectionAt(1));
+ strtab->AttachWriter(w);
+ for (int i = 0, length = sections_.length();
+ i < length;
+ i++) {
+ sections_[i]->PopulateHeader(headers.at(i), strtab);
+ }
+ strtab->DetachWriter();
+ }
+
+ int SectionHeaderPosition(uint32_t section_index) {
+ return sizeof(ELFHeader) + sizeof(ELFSection::Header) * section_index;
+ }
+
+ void WriteSections(Writer* w) {
+ Writer::Slot<ELFSection::Header> headers =
+ w->SlotAt<ELFSection::Header>(sizeof(ELFHeader));
+
+ for (int i = 0, length = sections_.length();
+ i < length;
+ i++) {
+ sections_[i]->WriteBody(headers.at(i), w);
+ }
+ }
+
+ ZoneList<ELFSection*> sections_;
+};
+
+
+class ELFSymbol BASE_EMBEDDED {
+ public:
+ enum Type {
+ TYPE_NOTYPE = 0,
+ TYPE_OBJECT = 1,
+ TYPE_FUNC = 2,
+ TYPE_SECTION = 3,
+ TYPE_FILE = 4,
+ TYPE_LOPROC = 13,
+ TYPE_HIPROC = 15
+ };
+
+ enum Binding {
+ BIND_LOCAL = 0,
+ BIND_GLOBAL = 1,
+ BIND_WEAK = 2,
+ BIND_LOPROC = 13,
+ BIND_HIPROC = 15
+ };
+
+ ELFSymbol(const char* name,
+ uintptr_t value,
+ uintptr_t size,
+ Binding binding,
+ Type type,
+ uint16_t section)
+ : name(name),
+ value(value),
+ size(size),
+ info((binding << 4) | type),
+ other(0),
+ section(section) {
+ }
+
+ Binding binding() const {
+ return static_cast<Binding>(info >> 4);
+ }
+
+#if defined(V8_TARGET_ARCH_IA32)
+ struct SerializedLayout {
+ SerializedLayout(uint32_t name,
+ uintptr_t value,
+ uintptr_t size,
+ Binding binding,
+ Type type,
+ uint16_t section)
+ : name(name),
+ value(value),
+ size(size),
+ info((binding << 4) | type),
+ other(0),
+ section(section) {
+ }
+
+ uint32_t name;
+ uintptr_t value;
+ uintptr_t size;
+ uint8_t info;
+ uint8_t other;
+ uint16_t section;
+ };
+#elif defined(V8_TARGET_ARCH_X64)
+ struct SerializedLayout {
+ SerializedLayout(uint32_t name,
+ uintptr_t value,
+ uintptr_t size,
+ Binding binding,
+ Type type,
+ uint16_t section)
+ : name(name),
+ info((binding << 4) | type),
+ other(0),
+ section(section),
+ value(value),
+ size(size) {
+ }
+
+ uint32_t name;
+ uint8_t info;
+ uint8_t other;
+ uint16_t section;
+ uintptr_t value;
+ uintptr_t size;
+ };
+#endif
+
+ void Write(Writer::Slot<SerializedLayout> s, StringTable* t) {
+ // Convert symbol names from strings to indexes in the string table.
+ s->name = t->Add(name);
+ s->value = value;
+ s->size = size;
+ s->info = info;
+ s->other = other;
+ s->section = section;
+ }
+
+ private:
+ const char* name;
+ uintptr_t value;
+ uintptr_t size;
+ uint8_t info;
+ uint8_t other;
+ uint16_t section;
+};
+
+
+class ELFSymbolTable : public ELFSection {
+ public:
+ explicit ELFSymbolTable(const char* name)
+ : ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)),
+ locals_(1),
+ globals_(1) {
+ }
+
+ virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
+ w->Align(header->alignment);
+ int total_symbols = locals_.length() + globals_.length() + 1;
+ header->offset = w->position();
+
+ Writer::Slot<ELFSymbol::SerializedLayout> symbols =
+ w->CreateSlotsHere<ELFSymbol::SerializedLayout>(total_symbols);
+
+ header->size = w->position() - header->offset;
+
+ // String table for this symbol table should follow it in the section table.
+ StringTable* strtab =
+ static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
+ strtab->AttachWriter(w);
+ symbols.at(0).set(ELFSymbol::SerializedLayout(0,
+ 0,
+ 0,
+ ELFSymbol::BIND_LOCAL,
+ ELFSymbol::TYPE_NOTYPE,
+ 0));
+ WriteSymbolsList(&locals_, symbols.at(1), strtab);
+ WriteSymbolsList(&globals_, symbols.at(locals_.length() + 1), strtab);
+ strtab->DetachWriter();
+ }
+
+ void Add(const ELFSymbol& symbol) {
+ if (symbol.binding() == ELFSymbol::BIND_LOCAL) {
+ locals_.Add(symbol);
+ } else {
+ globals_.Add(symbol);
+ }
+ }
+
+ protected:
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ ELFSection::PopulateHeader(header);
+ // We are assuming that string table will follow symbol table.
+ header->link = index() + 1;
+ header->info = locals_.length() + 1;
+ header->entry_size = sizeof(ELFSymbol::SerializedLayout);
+ }
+
+ private:
+ void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
+ Writer::Slot<ELFSymbol::SerializedLayout> dst,
+ StringTable* strtab) {
+ for (int i = 0, len = src->length();
+ i < len;
+ i++) {
+ src->at(i).Write(dst.at(i), strtab);
+ }
+ }
+
+ ZoneList<ELFSymbol> locals_;
+ ZoneList<ELFSymbol> globals_;
+};
+
+
+class CodeDescription BASE_EMBEDDED {
+ public:
+ CodeDescription(const char* name,
+ Code* code,
+ Handle<Script> script,
+ GDBJITLineInfo* lineinfo)
+ : name_(name), code_(code), script_(script), lineinfo_(lineinfo)
+ { }
+
+ const char* code_name() const {
+ return name_;
+ }
+
+ uintptr_t code_size() const {
+ return code_->instruction_end() - code_->instruction_start();
+ }
+
+ uintptr_t code_start() const {
+ return (uintptr_t)code_->instruction_start();
+ }
+
+ bool is_line_info_available() {
+ return !script_.is_null() &&
+ script_->source()->IsString() &&
+ script_->HasValidSource() &&
+ script_->name()->IsString() &&
+ lineinfo_ != NULL;
+ }
+
+ GDBJITLineInfo* lineinfo() const { return lineinfo_; }
+
+ SmartPointer<char> filename() {
+ return String::cast(script_->name())->ToCString();
+ }
+
+ int GetScriptLineNumber(int pos) {
+ return GetScriptLineNumberSafe(script_, pos) + 1;
+ }
+
+ private:
+ const char* name_;
+ Code* code_;
+ Handle<Script> script_;
+ GDBJITLineInfo* lineinfo_;
+};
+
+
+static void CreateSymbolsTable(CodeDescription* desc,
+ ELF* elf,
+ int text_section_index) {
+ ELFSymbolTable* symtab = new ELFSymbolTable(".symtab");
+ StringTable* strtab = new StringTable(".strtab");
+
+ // Symbol table should be followed by the linked string table.
+ elf->AddSection(symtab);
+ elf->AddSection(strtab);
+
+ symtab->Add(ELFSymbol("V8 Code",
+ 0,
+ 0,
+ ELFSymbol::BIND_LOCAL,
+ ELFSymbol::TYPE_FILE,
+ ELFSection::INDEX_ABSOLUTE));
+
+ symtab->Add(ELFSymbol(desc->code_name(),
+ 0,
+ desc->code_size(),
+ ELFSymbol::BIND_GLOBAL,
+ ELFSymbol::TYPE_FUNC,
+ text_section_index));
+}
+
+
+class DebugInfoSection : public ELFSection {
+ public:
+ explicit DebugInfoSection(CodeDescription* desc)
+ : ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
+
+ bool WriteBody(Writer* w) {
+ Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
+ uintptr_t start = w->position();
+ w->Write<uint16_t>(2); // DWARF version.
+ w->Write<uint32_t>(0); // Abbreviation table offset.
+ w->Write<uint8_t>(sizeof(intptr_t));
+
+ w->WriteULEB128(1); // Abbreviation code.
+ w->WriteString(*desc_->filename());
+ w->Write<intptr_t>(desc_->code_start());
+ w->Write<intptr_t>(desc_->code_start() + desc_->code_size());
+ w->Write<uint32_t>(0);
+ size.set(static_cast<uint32_t>(w->position() - start));
+ return true;
+ }
+
+ private:
+ CodeDescription* desc_;
+};
+
+
+class DebugAbbrevSection : public ELFSection {
+ public:
+ DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
+
+ // DWARF2 standard, figure 14.
+ enum DWARF2Tags {
+ DW_TAG_COMPILE_UNIT = 0x11
+ };
+
+ // DWARF2 standard, figure 16.
+ enum DWARF2ChildrenDetermination {
+ DW_CHILDREN_NO = 0,
+ DW_CHILDREN_YES = 1
+ };
+
+ // DWARF standard, figure 17.
+ enum DWARF2Attribute {
+ DW_AT_NAME = 0x3,
+ DW_AT_STMT_LIST = 0x10,
+ DW_AT_LOW_PC = 0x11,
+ DW_AT_HIGH_PC = 0x12
+ };
+
+ // DWARF2 standard, figure 19.
+ enum DWARF2AttributeForm {
+ DW_FORM_ADDR = 0x1,
+ DW_FORM_STRING = 0x8,
+ DW_FORM_DATA4 = 0x6
+ };
+
+ bool WriteBody(Writer* w) {
+ w->WriteULEB128(1);
+ w->WriteULEB128(DW_TAG_COMPILE_UNIT);
+ w->Write<uint8_t>(DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ w->WriteULEB128(DW_AT_LOW_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_HIGH_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_STMT_LIST);
+ w->WriteULEB128(DW_FORM_DATA4);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+ return true;
+ }
+};
+
+
+class DebugLineSection : public ELFSection {
+ public:
+ explicit DebugLineSection(CodeDescription* desc)
+ : ELFSection(".debug_line", TYPE_PROGBITS, 1),
+ desc_(desc) { }
+
+ // DWARF2 standard, figure 34.
+ enum DWARF2Opcodes {
+ DW_LNS_COPY = 1,
+ DW_LNS_ADVANCE_PC = 2,
+ DW_LNS_ADVANCE_LINE = 3,
+ DW_LNS_SET_FILE = 4,
+ DW_LNS_SET_COLUMN = 5,
+ DW_LNS_NEGATE_STMT = 6
+ };
+
+ // DWARF2 standard, figure 35.
+ enum DWARF2ExtendedOpcode {
+ DW_LNE_END_SEQUENCE = 1,
+ DW_LNE_SET_ADDRESS = 2,
+ DW_LNE_DEFINE_FILE = 3
+ };
+
+ bool WriteBody(Writer* w) {
+ // Write prologue.
+ Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
+ uintptr_t start = w->position();
+
+ w->Write<uint16_t>(2); // Field version.
+ Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
+ uintptr_t prologue_start = w->position();
+ w->Write<uint8_t>(1); // Field minimum_instruction_length.
+ w->Write<uint8_t>(1); // Field default_is_stmt.
+ w->Write<int8_t>(0); // Field line_base.
+ w->Write<uint8_t>(2); // Field line_range.
+ w->Write<uint8_t>(DW_LNS_NEGATE_STMT + 1); // Field opcode_base.
+ w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
+ w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
+ w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
+ w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count.
+ w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
+ w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
+ w->Write<uint8_t>(0); // Empty include_directories sequence.
+ w->WriteString(*desc_->filename()); // File name.
+ w->WriteULEB128(0); // Current directory.
+ w->WriteULEB128(0); // Unknown modification time.
+ w->WriteULEB128(0); // Unknown file size.
+ w->Write<uint8_t>(0);
+ prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start));
+
+ WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
+ w->Write<intptr_t>(desc_->code_start());
+
+ intptr_t pc = 0;
+ intptr_t line = 1;
+ bool is_statement = true;
+
+ List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
+ pc_info->Sort(&ComparePCInfo);
+ for (int i = 0; i < pc_info->length(); i++) {
+ GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
+ uintptr_t pc_diff = info->pc_ - pc;
+ ASSERT(info->pc_ >= pc);
+ if (pc_diff != 0) {
+ w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
+ w->WriteSLEB128(pc_diff);
+ pc += pc_diff;
+ }
+ intptr_t line_diff = desc_->GetScriptLineNumber(info->pos_) - line;
+ if (line_diff != 0) {
+ w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
+ w->WriteSLEB128(line_diff);
+ line += line_diff;
+ }
+ if (is_statement != info->is_statement_) {
+ w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
+ is_statement = !is_statement;
+ }
+ if (pc_diff != 0 || i == 0) {
+ w->Write<uint8_t>(DW_LNS_COPY);
+ }
+ }
+ WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
+ total_length.set(static_cast<uint32_t>(w->position() - start));
+ return true;
+ }
+
+ private:
+ void WriteExtendedOpcode(Writer* w,
+ DWARF2ExtendedOpcode op,
+ size_t operands_size) {
+ w->Write<uint8_t>(0);
+ w->WriteULEB128(operands_size + 1);
+ w->Write<uint8_t>(op);
+ }
+
+ static int ComparePCInfo(const GDBJITLineInfo::PCInfo* a,
+ const GDBJITLineInfo::PCInfo* b) {
+ if (a->pc_ == b->pc_) {
+ if (a->is_statement_ != b->is_statement_) {
+ return b->is_statement_ ? +1 : -1;
+ }
+ return 0;
+ } else if (a->pc_ > b->pc_) {
+ return +1;
+ } else {
+ return -1;
+ }
+ }
+
+ CodeDescription* desc_;
+};
+
+
+static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
+ if (desc->is_line_info_available()) {
+ elf->AddSection(new DebugInfoSection(desc));
+ elf->AddSection(new DebugAbbrevSection);
+ elf->AddSection(new DebugLineSection(desc));
+ }
+}
+
+
+// -------------------------------------------------------------------
+// Binary GDB JIT Interface as described in
+// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
+extern "C" {
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } JITAction;
+
+ struct JITCodeEntry {
+ JITCodeEntry* next_;
+ JITCodeEntry* prev_;
+ Address symfile_addr_;
+ uint64_t symfile_size_;
+ };
+
+ struct JITDescriptor {
+ uint32_t version_;
+ uint32_t action_flag_;
+ JITCodeEntry *relevant_entry_;
+ JITCodeEntry *first_entry_;
+ };
+
+ // GDB will place breakpoint into this function.
+ // To prevent GCC from inlining or removing it we place noinline attribute
+ // and inline assembler statement inside.
+ void __attribute__((noinline)) __jit_debug_register_code() {
+ __asm__("");
+ }
+
+ // GDB will inspect contents of this descriptor.
+ // Static initialization is necessary to prevent GDB from seeing
+ // uninitialized descriptor.
+ JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+}
+
+
+static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
+ uintptr_t symfile_size) {
+ JITCodeEntry* entry = static_cast<JITCodeEntry*>(
+ malloc(sizeof(JITCodeEntry) + symfile_size));
+
+ entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
+ entry->symfile_size_ = symfile_size;
+ memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
+
+ entry->prev_ = entry->next_ = NULL;
+
+ return entry;
+}
+
+
+static void DestroyCodeEntry(JITCodeEntry* entry) {
+ free(entry);
+}
+
+
+static void RegisterCodeEntry(JITCodeEntry* entry) {
+ entry->next_ = __jit_debug_descriptor.first_entry_;
+ if (entry->next_ != NULL) entry->next_->prev_ = entry;
+ __jit_debug_descriptor.first_entry_ =
+ __jit_debug_descriptor.relevant_entry_ = entry;
+
+ __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_register_code();
+}
+
+
+static void UnregisterCodeEntry(JITCodeEntry* entry) {
+ if (entry->prev_ != NULL) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __jit_debug_descriptor.first_entry_ = entry->next_;
+ }
+
+ if (entry->next_ != NULL) {
+ entry->next_->prev_ = entry->prev_;
+ }
+
+ __jit_debug_descriptor.relevant_entry_ = entry;
+ __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_register_code();
+}
+
+
+static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
+ ZoneScope zone_scope(DELETE_ON_EXIT);
+
+ ELF elf;
+ Writer w(&elf);
+
+ int text_section_index = elf.AddSection(
+ new FullHeaderELFSection(".text",
+ ELFSection::TYPE_NOBITS,
+ kCodeAlignment,
+ desc->code_start(),
+ 0,
+ desc->code_size(),
+ ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
+
+ CreateSymbolsTable(desc, &elf, text_section_index);
+
+ CreateDWARFSections(desc, &elf);
+
+ elf.Write(&w);
+
+ return CreateCodeEntry(w.buffer(), w.position());
+}
+
+
+static bool SameCodeObjects(void* key1, void* key2) {
+ return key1 == key2;
+}
+
+
+static HashMap entries(&SameCodeObjects);
+
+
+static uint32_t HashForCodeObject(Code* code) {
+ static const uintptr_t kGoldenRatio = 2654435761u;
+ uintptr_t hash = reinterpret_cast<uintptr_t>(code->address());
+ return static_cast<uint32_t>((hash >> kCodeAlignmentBits) * kGoldenRatio);
+}
+
+
+static const intptr_t kLineInfoTag = 0x1;
+
+
+static bool IsLineInfoTagged(void* ptr) {
+ return 0 != (reinterpret_cast<intptr_t>(ptr) & kLineInfoTag);
+}
+
+
+static void* TagLineInfo(GDBJITLineInfo* ptr) {
+ return reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(ptr) | kLineInfoTag);
+}
+
+
+static GDBJITLineInfo* UntagLineInfo(void* ptr) {
+ return reinterpret_cast<GDBJITLineInfo*>(
+ reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag);
+}
+
+
+void GDBJITInterface::AddCode(Handle<String> name,
+ Handle<Script> script,
+ Handle<Code> code) {
+ if (!FLAG_gdbjit) return;
+
+ // Force initialization of line_ends array.
+ GetScriptLineNumber(script, 0);
+
+ if (!name.is_null()) {
+ SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
+ AddCode(*name_cstring, *code, *script);
+ } else {
+ AddCode("", *code, *script);
+ }
+}
+
+
+void GDBJITInterface::AddCode(const char* name,
+ Code* code,
+ Script* script) {
+ if (!FLAG_gdbjit) return;
+ AssertNoAllocation no_gc;
+
+ HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
+ if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
+
+ GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
+ CodeDescription code_desc(name,
+ code,
+ script != NULL ? Handle<Script>(script)
+ : Handle<Script>(),
+ lineinfo);
+
+ if (!FLAG_gdbjit_full && !code_desc.is_line_info_available()) {
+ delete lineinfo;
+ entries.Remove(code, HashForCodeObject(code));
+ return;
+ }
+
+ JITCodeEntry* entry = CreateELFObject(&code_desc);
+ ASSERT(!IsLineInfoTagged(entry));
+
+ delete lineinfo;
+ e->value = entry;
+
+ RegisterCodeEntry(entry);
+}
+
+
+void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
+ const char* name,
+ Code* code) {
+ if (!FLAG_gdbjit) return;
+
+ EmbeddedVector<char, 256> buffer;
+ StringBuilder builder(buffer.start(), buffer.length());
+
+ builder.AddString(Tag2String(tag));
+ if ((name != NULL) && (*name != '\0')) {
+ builder.AddString(": ");
+ builder.AddString(name);
+ } else {
+ builder.AddFormatted(": code object %p", static_cast<void*>(code));
+ }
+
+ AddCode(builder.Finalize(), code);
+}
+
+
+void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
+ String* name,
+ Code* code) {
+ if (!FLAG_gdbjit) return;
+ AddCode(tag, name != NULL ? *name->ToCString(DISALLOW_NULLS) : NULL, code);
+}
+
+
+void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
+ if (!FLAG_gdbjit) return;
+
+ AddCode(tag, "", code);
+}
+
+
+void GDBJITInterface::RemoveCode(Code* code) {
+ if (!FLAG_gdbjit) return;
+
+ HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), false);
+ if (e == NULL) return;
+
+ if (IsLineInfoTagged(e->value)) {
+ delete UntagLineInfo(e->value);
+ } else {
+ JITCodeEntry* entry = static_cast<JITCodeEntry*>(e->value);
+ UnregisterCodeEntry(entry);
+ DestroyCodeEntry(entry);
+ }
+ e->value = NULL;
+ entries.Remove(code, HashForCodeObject(code));
+}
+
+
+void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
+ GDBJITLineInfo* line_info) {
+ ASSERT(!IsLineInfoTagged(line_info));
+ HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
+ ASSERT(e->value == NULL);
+ e->value = TagLineInfo(line_info);
+}
+
+
+} } // namespace v8::internal
+#endif
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
new file mode 100644
index 00000000..5d348b69
--- /dev/null
+++ b/src/gdb-jit.h
@@ -0,0 +1,136 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_GDB_JIT_H_
+#define V8_GDB_JIT_H_
+
+//
+// Basic implementation of GDB JIT Interface client.
+// GBD JIT Interface is supported in GDB 7.0 and above.
+// Currently on x64 and ia32 architectures and Linux OS are supported.
+//
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+#include "v8.h"
+#include "factory.h"
+
+namespace v8 {
+namespace internal {
+
+#define CODE_TAGS_LIST(V) \
+ V(LOAD_IC) \
+ V(KEYED_LOAD_IC) \
+ V(STORE_IC) \
+ V(KEYED_STORE_IC) \
+ V(CALL_IC) \
+ V(CALL_INITIALIZE) \
+ V(CALL_PRE_MONOMORPHIC) \
+ V(CALL_NORMAL) \
+ V(CALL_MEGAMORPHIC) \
+ V(CALL_MISS) \
+ V(STUB) \
+ V(BUILTIN) \
+ V(SCRIPT) \
+ V(EVAL)
+
+class GDBJITLineInfo : public Malloced {
+ public:
+ GDBJITLineInfo()
+ : pc_info_(10) { }
+
+ void SetPosition(intptr_t pc, int pos, bool is_statement) {
+ AddPCInfo(PCInfo(pc, pos, is_statement));
+ }
+
+ struct PCInfo {
+ PCInfo(intptr_t pc, int pos, bool is_statement)
+ : pc_(pc), pos_(pos), is_statement_(is_statement) { }
+
+ intptr_t pc_;
+ int pos_;
+ bool is_statement_;
+ };
+
+ List<PCInfo>* pc_info() {
+ return &pc_info_;
+ }
+
+ private:
+ void AddPCInfo(const PCInfo& pc_info) {
+ pc_info_.Add(pc_info);
+ }
+
+ List<PCInfo> pc_info_;
+};
+
+
+class GDBJITInterface: public AllStatic {
+ public:
+ enum CodeTag {
+#define V(x) x,
+ CODE_TAGS_LIST(V)
+#undef V
+ TAG_COUNT
+ };
+
+ static const char* Tag2String(CodeTag tag) {
+ switch (tag) {
+#define V(x) case x: return #x;
+ CODE_TAGS_LIST(V)
+#undef V
+ default:
+ return NULL;
+ }
+ }
+
+ static void AddCode(const char* name,
+ Code* code,
+ Script* script = NULL);
+
+ static void AddCode(Handle<String> name,
+ Handle<Script> script,
+ Handle<Code> code);
+
+ static void AddCode(CodeTag tag, String* name, Code* code);
+
+ static void AddCode(CodeTag tag, const char* name, Code* code);
+
+ static void AddCode(CodeTag tag, Code* code);
+
+ static void RemoveCode(Code* code);
+
+ static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+};
+
+#define GDBJIT(action) GDBJITInterface::action
+
+} } // namespace v8::internal
+#else
+#define GDBJIT(action) ((void) 0)
+#endif
+
+#endif
diff --git a/src/heap.cc b/src/heap.cc
index 5832ccbb..32d751a3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2724,6 +2724,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_flags(flags);
+ if (code->is_call_stub() || code->is_keyed_call_stub()) {
+ code->set_check_type(RECEIVER_MAP_CHECK);
+ }
code->set_deoptimization_data(empty_fixed_array());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@@ -5029,7 +5032,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
obj->SetMark();
}
UnmarkingVisitor visitor;
- Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ Heap::IterateRoots(&visitor, VISIT_ALL);
while (visitor.can_process())
visitor.ProcessNext();
}
diff --git a/src/heap.h b/src/heap.h
index 25384d22..0d79081a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1865,7 +1865,7 @@ class GCTracer BASE_EMBEDDED {
}
~Scope() {
- ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
+ ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
}
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 3f39888e..b13bb0c4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -996,7 +996,8 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
- has_int32_value_ = static_cast<double>(static_cast<int32_t>(n)) == n;
+ double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
+ has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
double_value_ = n;
has_double_value_ = true;
@@ -1190,6 +1191,11 @@ void HStoreGlobal::PrintDataTo(StringStream* stream) const {
}
+void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
// Implementation of type inference and type conversions. Calculates
// the inferred type of this instruction based on the input operands.
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index f7eb1734..eebec5a9 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -92,6 +92,7 @@ class LChunkBuilder;
// HCallNew
// HCallRuntime
// HCallStub
+// HCheckPrototypeMaps
// HConstant
// HControlInstruction
// HDeoptimize
@@ -106,6 +107,7 @@ class LChunkBuilder;
// HGlobalObject
// HGlobalReceiver
// HLeaveInlined
+// HLoadContextSlot
// HLoadGlobal
// HMaterializedLiteral
// HArrayLiteral
@@ -125,7 +127,6 @@ class LChunkBuilder;
// HCheckInstanceType
// HCheckMap
// HCheckNonSmi
-// HCheckPrototypeMaps
// HCheckSmi
// HDeleteProperty
// HFixedArrayLength
@@ -220,6 +221,7 @@ class LChunkBuilder;
V(JSArrayLength) \
V(ClassOfTest) \
V(LeaveInlined) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
@@ -771,6 +773,10 @@ class HInstruction: public HValue {
virtual void Verify() const;
#endif
+ // Returns whether this is some kind of deoptimizing check
+ // instruction.
+ virtual bool IsCheckInstruction() const { return false; }
+
DECLARE_INSTRUCTION(Instruction)
protected:
@@ -1502,6 +1508,8 @@ class HCheckMap: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1535,6 +1543,8 @@ class HCheckFunction: public HUnaryOperation {
SetFlag(kUseGVN);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1571,6 +1581,8 @@ class HCheckInstanceType: public HUnaryOperation {
SetFlag(kUseGVN);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1608,6 +1620,8 @@ class HCheckNonSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1622,42 +1636,42 @@ class HCheckNonSmi: public HUnaryOperation {
};
-class HCheckPrototypeMaps: public HUnaryOperation {
+class HCheckPrototypeMaps: public HInstruction {
public:
- HCheckPrototypeMaps(HValue* value,
- Handle<JSObject> holder,
- Handle<Map> receiver_map)
- : HUnaryOperation(value),
- holder_(holder),
- receiver_map_(receiver_map) {
- set_representation(Representation::Tagged());
+ HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
+ : prototype_(prototype), holder_(holder) {
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
}
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
+ virtual bool IsCheckInstruction() const { return true; }
#ifdef DEBUG
virtual void Verify() const;
#endif
+ Handle<JSObject> prototype() const { return prototype_; }
Handle<JSObject> holder() const { return holder_; }
- Handle<Map> receiver_map() const { return receiver_map_; }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+ virtual intptr_t Hashcode() const {
+ ASSERT(!Heap::IsAllocationAllowed());
+ intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
+ hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
+ return hash;
+ }
+
protected:
virtual bool DataEquals(HValue* other) const {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
- return holder_.is_identical_to(b->holder()) &&
- receiver_map_.is_identical_to(b->receiver_map());
+ return prototype_.is_identical_to(b->prototype()) &&
+ holder_.is_identical_to(b->holder());
}
private:
+ Handle<JSObject> prototype_;
Handle<JSObject> holder_;
- Handle<Map> receiver_map_;
};
@@ -1668,6 +1682,8 @@ class HCheckSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1996,6 +2012,8 @@ class HBoundsCheck: public HBinaryOperation {
SetFlag(kUseGVN);
}
+ virtual bool IsCheckInstruction() const { return true; }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
@@ -2601,6 +2619,39 @@ class HStoreGlobal: public HUnaryOperation {
};
+class HLoadContextSlot: public HInstruction {
+ public:
+ HLoadContextSlot(int context_chain_length , int slot_index)
+ : context_chain_length_(context_chain_length), slot_index_(slot_index) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnCalls);
+ }
+
+ int context_chain_length() const { return context_chain_length_; }
+ int slot_index() const { return slot_index_; }
+
+ virtual void PrintDataTo(StringStream* stream) const;
+
+ virtual intptr_t Hashcode() const {
+ return context_chain_length() * 29 + slot_index();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const {
+ HLoadContextSlot* b = HLoadContextSlot::cast(other);
+ return (context_chain_length() == b->context_chain_length())
+ && (slot_index() == b->slot_index());
+ }
+
+ private:
+ int context_chain_length_;
+ int slot_index_;
+};
+
+
class HLoadNamedField: public HUnaryOperation {
public:
HLoadNamedField(HValue* object, bool is_in_object, int offset)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 0d92b2ee..da41ef94 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -687,6 +687,11 @@ HGraph::HGraph(CompilationInfo* info)
}
+bool HGraph::AllowAggressiveOptimizations() const {
+ return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
+}
+
+
Handle<Code> HGraph::Compile() {
int values = GetMaximumValueID();
if (values > LAllocator::max_initial_value_ids()) {
@@ -1453,8 +1458,12 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
// about code that was never executed.
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
- if (!instr->IsChange() &&
- FLAG_aggressive_loop_invariant_motion) return true;
+ if (FLAG_aggressive_loop_invariant_motion &&
+ !instr->IsChange() &&
+ (!instr->IsCheckInstruction() ||
+ graph_->AllowAggressiveOptimizations())) {
+ return true;
+ }
HBasicBlock* block = instr->block();
bool result = true;
if (block != loop_header) {
@@ -2940,6 +2949,21 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
BAILOUT("unsupported context for arguments object");
}
ast_context()->ReturnValue(environment()->Lookup(variable));
+ } else if (variable->IsContextSlot()) {
+ if (variable->mode() == Variable::CONST) {
+ BAILOUT("reference to const context slot");
+ }
+ Slot* slot = variable->AsSlot();
+ CompilationInfo* info = graph()->info();
+ int context_chain_length = info->function()->scope()->
+ ContextChainLength(slot->var()->scope());
+ ASSERT(context_chain_length >= 0);
+ // TODO(antonm): if slot's value is not modified by closures, instead
+ // of reading it out of context, we could just embed the value as
+ // a constant.
+ HLoadContextSlot* instr =
+ new HLoadContextSlot(context_chain_length, slot->index());
+ ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
LookupGlobalPropertyCell(variable, &lookup, false);
@@ -2956,7 +2980,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
ast_context()->ReturnInstruction(instr, expr->id());
} else {
- BAILOUT("reference to non-stack-allocated/non-global variable");
+ BAILOUT("reference to a variable which requires dynamic lookup");
}
}
@@ -3482,7 +3506,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
Top(),
expr->position(),
expr->AssignmentId());
- } else {
+ } else if (var->IsStackAllocated()) {
// We allow reference to the arguments object only in assignemtns
// to local variables to make sure that the arguments object does
// not escape and is not modified.
@@ -3495,6 +3519,8 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
VISIT_FOR_VALUE(expr->value());
}
Bind(proxy->var(), Top());
+ } else {
+ BAILOUT("Assigning to no non-stack-allocated/non-global variable");
}
// Return the value.
ast_context()->ReturnValue(Pop());
@@ -3795,9 +3821,9 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
AddInstruction(new HCheckMap(receiver, receiver_map));
}
if (!expr->holder().is_null()) {
- AddInstruction(new HCheckPrototypeMaps(receiver,
- expr->holder(),
- receiver_map));
+ AddInstruction(new HCheckPrototypeMaps(
+ Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
+ expr->holder()));
}
}
@@ -4009,7 +4035,9 @@ bool HGraphBuilder::TryInline(Call* expr) {
function_return_->MarkAsInlineReturnTarget();
}
call_context_ = ast_context();
- TypeFeedbackOracle new_oracle(Handle<Code>(shared->code()));
+ TypeFeedbackOracle new_oracle(
+ Handle<Code>(shared->code()),
+ Handle<Context>(target->context()->global_context()));
oracle_ = &new_oracle;
graph()->info()->SetOsrAstId(AstNode::kNoNumber);
@@ -4211,7 +4239,8 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
HValue* arg_two_value = environment()->Lookup(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
- if (!expr->IsMonomorphic()) return false;
+ if (!expr->IsMonomorphic() ||
+ expr->check_type() != RECEIVER_MAP_CHECK) return false;
// Found pattern f.apply(receiver, arguments).
VisitForValue(prop->obj());
@@ -4280,7 +4309,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
- if (expr->IsMonomorphic()) {
+ if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, types->first(), true);
if (TryMathFunctionInline(expr)) {
@@ -4305,6 +4334,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else if (types != NULL && types->length() > 1) {
+ ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
@@ -5713,31 +5743,40 @@ void HStatistics::Print() {
PrintF("%30s", names_[i]);
double ms = static_cast<double>(timing_[i]) / 1000;
double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %0.3f ms / %0.3f %% \n", ms, percent);
+ PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
+
+ unsigned size = sizes_[i];
+ double size_percent = static_cast<double>(size) * 100 / total_size_;
+ PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
}
- PrintF("%30s - %0.3f ms \n", "Sum", static_cast<double>(sum) / 1000);
+ PrintF("%30s - %7.3f ms %8u bytes\n", "Sum",
+ static_cast<double>(sum) / 1000,
+ total_size_);
PrintF("---------------------------------------------------------------\n");
- PrintF("%30s - %0.3f ms (%0.1f times slower than full code gen)\n",
+ PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
"Total",
static_cast<double>(total_) / 1000,
static_cast<double>(total_) / full_code_gen_);
}
-void HStatistics::SaveTiming(const char* name, int64_t ticks) {
+void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
if (name == HPhase::kFullCodeGen) {
full_code_gen_ += ticks;
} else if (name == HPhase::kTotal) {
total_ += ticks;
} else {
+ total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (names_[i] == name) {
timing_[i] += ticks;
+ sizes_[i] += size;
return;
}
}
names_.Add(name);
timing_.Add(ticks);
+ sizes_.Add(size);
}
}
@@ -5758,13 +5797,15 @@ void HPhase::Begin(const char* name,
chunk_ = allocator->chunk();
}
if (FLAG_time_hydrogen) start_ = OS::Ticks();
+ start_allocation_size_ = Zone::allocation_size_;
}
void HPhase::End() const {
if (FLAG_time_hydrogen) {
int64_t end = OS::Ticks();
- HStatistics::Instance()->SaveTiming(name_, end - start_);
+ unsigned size = Zone::allocation_size_ - start_allocation_size_;
+ HStatistics::Instance()->SaveTiming(name_, end - start_, size);
}
if (FLAG_trace_hydrogen) {
@@ -5777,7 +5818,6 @@ void HPhase::End() const {
#ifdef DEBUG
if (graph_ != NULL) graph_->Verify();
- if (chunk_ != NULL) chunk_->Verify();
if (allocator_ != NULL) allocator_->Verify();
#endif
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 872ae98e..19f89838 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -296,6 +296,9 @@ class HGraph: public HSubgraph {
explicit HGraph(CompilationInfo* info);
CompilationInfo* info() const { return info_; }
+
+ bool AllowAggressiveOptimizations() const;
+
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
Handle<String> debug_name() const { return info_->function()->debug_name(); }
@@ -906,7 +909,7 @@ class HValueMap: public ZoneObject {
class HStatistics: public Malloced {
public:
void Print();
- void SaveTiming(const char* name, int64_t ticks);
+ void SaveTiming(const char* name, int64_t ticks, unsigned size);
static HStatistics* Instance() {
static SetOncePointer<HStatistics> instance;
if (!instance.is_set()) {
@@ -917,11 +920,19 @@ class HStatistics: public Malloced {
private:
- HStatistics() : timing_(5), names_(5), total_(0), full_code_gen_(0) { }
+ HStatistics()
+ : timing_(5),
+ names_(5),
+ sizes_(5),
+ total_(0),
+ total_size_(0),
+ full_code_gen_(0) { }
List<int64_t> timing_;
List<const char*> names_;
+ List<unsigned> sizes_;
int64_t total_;
+ unsigned total_size_;
int64_t full_code_gen_;
};
@@ -958,6 +969,7 @@ class HPhase BASE_EMBEDDED {
HGraph* graph_;
LChunk* chunk_;
LAllocator* allocator_;
+ unsigned start_allocation_size_;
};
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index c173a3dc..552d7b5e 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2465,6 +2465,17 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
}
+void Assembler::por(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xEB);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2489,6 +2500,40 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
}
+void Assembler::psllq(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF3);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrlq(XMMRegister reg, int8_t shift) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(edx, reg); // edx == 2
+ EMIT(shift);
+}
+
+
+void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD3);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 11acb561..20446b00 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -919,9 +919,13 @@ class Assembler : public Malloced {
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
+ void por(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t shift);
+ void psllq(XMMRegister dst, XMMRegister src);
+ void psrlq(XMMRegister reg, int8_t shift);
+ void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 91fb050c..72213dc8 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2015,8 +2015,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
- operands_type_ == TRBinaryOpIC::INT32);
+ ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
// Floating point case.
switch (op_) {
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index e3b0dfc6..1ecfd39c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -6649,38 +6649,41 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop, loop_condition,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
+
ASSERT(args->length() == 2);
+ // We will leave the separator on the stack until the end of the function.
Load(args->at(1));
+ // Load this to eax (= array)
Load(args->at(0));
Result array_result = frame_->Pop();
array_result.ToRegister(eax);
frame_->SpillAll();
- Label bailout;
- Label done;
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
- Register result_pos = no_reg;
+ Register elements = no_reg; // Will be eax.
- Register index = edi;
+ Register index = edx;
- Register current_string_length = ecx; // Will be ecx when live.
+ Register string_length = ecx;
- Register current_string = edx;
+ Register string = esi;
Register scratch = ebx;
- Register scratch_2 = esi;
- Register new_padding_chars = scratch_2;
-
- Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
- Operand elements = Operand(esp, 3 * kPointerSize);
- Operand result = Operand(esp, 2 * kPointerSize);
- Operand padding_chars = Operand(esp, 1 * kPointerSize);
- Operand array_length = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(4 * kPointerSize));
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
- // Check that eax is a JSArray
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -6691,140 +6694,226 @@ void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
- // If the array is empty, return the empty string.
- __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(scratch, 1);
- Label non_trivial;
- __ j(not_zero, &non_trivial);
- __ mov(result, Factory::empty_string());
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ sar(array_length, 1);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, Factory::empty_string());
__ jmp(&done);
- __ bind(&non_trivial);
- __ mov(array_length, scratch);
-
- __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
- __ mov(elements, scratch);
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+ // Save the FixedArray containing array's elements.
// End of array's live range.
- result_pos = array;
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
- // Check that the separator is a flat ascii string.
- __ mov(current_string, separator);
- __ test(current_string, Immediate(kSmiTagMask));
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, Immediate(0));
+ __ Set(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ __ jmp(&loop_condition);
+ __ bind(&loop);
+ __ cmp(index, Operand(array_length));
+ __ j(greater_equal, &done);
+
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // If the separator is the empty string, replace it with NULL.
- // The test for NULL is quicker than the empty string test, in a loop.
- __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
- Immediate(0));
- Label separator_checked;
- __ j(not_zero, &separator_checked);
- __ mov(separator, Immediate(0));
- __ bind(&separator_checked);
-
- // Check that elements[0] is a flat ascii string, and copy it in new space.
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
- __ test(current_string, Immediate(kSmiTagMask));
+ __ add(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_condition);
+ __ cmp(index, Operand(array_length));
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, &not_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // Allocate space to copy it. Round up the size to the alignment granularity.
- __ mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- __ shr(current_string_length, 1);
-
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, Operand(scratch));
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
// Live registers and stack values:
- // current_string_length: length of elements[0].
-
- // New string result in new space = elements[0]
- __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
- index, no_reg, &bailout);
- __ mov(result, result_pos);
-
- // Adjust current_string_length to include padding bytes at end of string.
- // Keep track of the number of padding bytes.
- __ mov(new_padding_chars, current_string_length);
- __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- __ sub(new_padding_chars, Operand(current_string_length));
- __ neg(new_padding_chars);
- __ mov(padding_chars, new_padding_chars);
-
- Label copy_loop_1_done;
- Label copy_loop_1;
- __ test(current_string_length, Operand(current_string_length));
- __ j(zero, &copy_loop_1_done);
- __ bind(&copy_loop_1);
- __ sub(Operand(current_string_length), Immediate(kPointerSize));
- __ mov(scratch, FieldOperand(current_string, current_string_length,
- times_1, SeqAsciiString::kHeaderSize));
- __ mov(FieldOperand(result_pos, current_string_length,
- times_1, SeqAsciiString::kHeaderSize),
- scratch);
- __ j(not_zero, &copy_loop_1);
- __ bind(&copy_loop_1_done);
-
- __ mov(index, Immediate(1));
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
// Loop condition: while (index < length).
- Label loop;
- __ bind(&loop);
- __ cmp(index, array_length);
- __ j(greater_equal, &done);
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
- // If the separator is the empty string, signalled by NULL, skip it.
- Label separator_done;
- __ mov(current_string, separator);
- __ test(current_string, Operand(current_string));
- __ j(zero, &separator_done);
-
- // Append separator to result. It is known to be a flat ascii string.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
- __ bind(&separator_done);
-
- // Add next element of array to the end of the result.
- // Get current_string = array[index].
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- // If current != flat ascii string drop result, return undefined.
- __ test(current_string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- // Append current to the result.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
- __ jmp(&loop); // End while (index < length).
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
__ bind(&bailout);
- __ mov(result, Factory::undefined_value());
+ __ mov(result_operand, Factory::undefined_value());
__ bind(&done);
- __ mov(eax, result);
+ __ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(4 * kPointerSize));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
frame_->Drop(1);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index ceba2494..3050c567 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -56,8 +58,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
- int deoptimization_index = table.GetDeoptimizationIndex(i);
- int gap_code_size = table.GetGapCodeSize(i);
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ int deoptimization_index = safepoint_entry.deoptimization_index();
+ int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
@@ -617,3 +620,5 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index dfbcbb76..4028a934 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1182,15 +1182,33 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xF3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psllq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x73) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("psllq %s,%d",
+ ASSERT(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "psllq" : "psrlq",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xD3) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("psrlq %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
@@ -1228,6 +1246,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xEB) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("por %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else {
UnimplementedInstruction();
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5f308582..2622b5e5 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -3351,39 +3351,37 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- Label bailout;
- Label done;
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop, loop_condition,
+ loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
// Load this to eax (= array)
VisitForAccumulatorValue(args->at(0));
-
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
- Register result_pos = no_reg;
+ Register elements = no_reg; // Will be eax.
- Register index = edi;
+ Register index = edx;
- Register current_string_length = ecx; // Will be ecx when live.
+ Register string_length = ecx;
- Register current_string = edx;
+ Register string = esi;
Register scratch = ebx;
- Register scratch_2 = esi;
- Register new_padding_chars = scratch_2;
-
- Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
- Operand elements = Operand(esp, 3 * kPointerSize);
- Operand result = Operand(esp, 2 * kPointerSize);
- Operand padding_chars = Operand(esp, 1 * kPointerSize);
- Operand array_length = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(4 * kPointerSize));
+ Register array_length = edi;
+ Register result_pos = no_reg; // Will be edi.
-
- // Check that eax is a JSArray
+ // Separator operand is already pushed.
+ Operand separator_operand = Operand(esp, 2 * kPointerSize);
+ Operand result_operand = Operand(esp, 1 * kPointerSize);
+ Operand array_length_operand = Operand(esp, 0);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ cld();
+ // Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@@ -3394,140 +3392,226 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
- // If the array is empty, return the empty string.
- __ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
- __ sar(scratch, 1);
- Label non_trivial;
- __ j(not_zero, &non_trivial);
- __ mov(result, Factory::empty_string());
+ // If the array has length zero, return the empty string.
+ __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
+ __ sar(array_length, 1);
+ __ j(not_zero, &non_trivial_array);
+ __ mov(result_operand, Factory::empty_string());
__ jmp(&done);
- __ bind(&non_trivial);
- __ mov(array_length, scratch);
-
- __ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
- __ mov(elements, scratch);
+ // Save the array length.
+ __ bind(&non_trivial_array);
+ __ mov(array_length_operand, array_length);
+ // Save the FixedArray containing array's elements.
// End of array's live range.
- result_pos = array;
+ elements = array;
+ __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
- // Check that the separator is a flat ascii string.
- __ mov(current_string, separator);
- __ test(current_string, Immediate(kSmiTagMask));
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ Set(index, Immediate(0));
+ __ Set(string_length, Immediate(0));
+ // Loop condition: while (index < length).
+ // Live loop registers: index, array_length, string,
+ // scratch, string_length, elements.
+ __ jmp(&loop_condition);
+ __ bind(&loop);
+ __ cmp(index, Operand(array_length));
+ __ j(greater_equal, &done);
+
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // If the separator is the empty string, replace it with NULL.
- // The test for NULL is quicker than the empty string test, in a loop.
- __ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
- Immediate(0));
- Label separator_checked;
- __ j(not_zero, &separator_checked);
- __ mov(separator, Immediate(0));
- __ bind(&separator_checked);
-
- // Check that elements[0] is a flat ascii string, and copy it in new space.
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
- __ test(current_string, Immediate(kSmiTagMask));
+ __ add(string_length,
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
+ __ j(overflow, &bailout);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_condition);
+ __ cmp(index, Operand(array_length));
+ __ j(less, &loop);
+
+ // If array_length is 1, return elements[0], a string.
+ __ cmp(array_length, 1);
+ __ j(not_equal, &not_size_one_array);
+ __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
+ __ mov(result_operand, scratch);
+ __ jmp(&done);
+
+ __ bind(&not_size_one_array);
+
+ // End of array_length live range.
+ result_pos = array_length;
+ array_length = no_reg;
+
+ // Live registers:
+ // string_length: Sum of string lengths, as a smi.
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ mov(string, separator_operand);
+ __ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
- // Allocate space to copy it. Round up the size to the alignment granularity.
- __ mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- __ shr(current_string_length, 1);
-
+ // Add (separator length times array_length) - separator length
+ // to string_length.
+ __ mov(scratch, separator_operand);
+ __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ imul(scratch, array_length_operand);
+ __ j(overflow, &bailout);
+ __ add(string_length, Operand(scratch));
+ __ j(overflow, &bailout);
+
+ __ shr(string_length, 1);
// Live registers and stack values:
- // current_string_length: length of elements[0].
-
- // New string result in new space = elements[0]
- __ AllocateAsciiString(result_pos, current_string_length, scratch_2,
- index, no_reg, &bailout);
- __ mov(result, result_pos);
-
- // Adjust current_string_length to include padding bytes at end of string.
- // Keep track of the number of padding bytes.
- __ mov(new_padding_chars, current_string_length);
- __ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- __ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- __ sub(new_padding_chars, Operand(current_string_length));
- __ neg(new_padding_chars);
- __ mov(padding_chars, new_padding_chars);
-
- Label copy_loop_1_done;
- Label copy_loop_1;
- __ test(current_string_length, Operand(current_string_length));
- __ j(zero, &copy_loop_1_done);
- __ bind(&copy_loop_1);
- __ sub(Operand(current_string_length), Immediate(kPointerSize));
- __ mov(scratch, FieldOperand(current_string, current_string_length,
- times_1, SeqAsciiString::kHeaderSize));
- __ mov(FieldOperand(result_pos, current_string_length,
- times_1, SeqAsciiString::kHeaderSize),
- scratch);
- __ j(not_zero, &copy_loop_1);
- __ bind(&copy_loop_1_done);
-
- __ mov(index, Immediate(1));
+ // string_length
+ // elements
+ __ AllocateAsciiString(result_pos, string_length, scratch,
+ index, string, &bailout);
+ __ mov(result_operand, result_pos);
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+
+
+ __ mov(string, separator_operand);
+ __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ j(equal, &one_char_separator);
+ __ j(greater, &long_separator);
+
+
+ // Empty separator case
+ __ mov(index, Immediate(0));
+ __ jmp(&loop_1_condition);
// Loop condition: while (index < length).
- Label loop;
- __ bind(&loop);
- __ cmp(index, array_length);
- __ j(greater_equal, &done);
+ __ bind(&loop_1);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+ // elements: the FixedArray of strings we are joining.
+
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+ __ bind(&loop_1_condition);
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_1); // End while (index < length).
+ __ jmp(&done);
- // If the separator is the empty string, signalled by NULL, skip it.
- Label separator_done;
- __ mov(current_string, separator);
- __ test(current_string, Operand(current_string));
- __ j(zero, &separator_done);
-
- // Append separator to result. It is known to be a flat ascii string.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
- __ bind(&separator_done);
-
- // Add next element of array to the end of the result.
- // Get current_string = array[index].
- __ mov(scratch, elements);
- __ mov(current_string, FieldOperand(scratch, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- // If current != flat ascii string drop result, return undefined.
- __ test(current_string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
- __ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
- __ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- // Append current to the result.
- __ AppendStringToTopOfNewSpace(current_string, current_string_length,
- result_pos, scratch, scratch_2, result,
- padding_chars, &bailout);
+
+ // One-character separator case
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(separator_operand, scratch);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_2_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_2);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator character to the result.
+ __ mov_b(scratch, separator_operand);
+ __ mov_b(Operand(result_pos, 0), scratch);
+ __ inc(result_pos);
+
+ __ bind(&loop_2_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+ __ add(Operand(index), Immediate(1));
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_2); // End while (index < length).
+ __ jmp(&done);
+
+
+ // Long separator case (separator is more than one character).
+ __ bind(&long_separator);
+
+ __ Set(index, Immediate(0));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator
+ __ jmp(&loop_3_entry);
+ // Loop condition: while (index < length).
+ __ bind(&loop_3);
+ // Each iteration of the loop concatenates one string to the result.
+ // Live values in registers:
+ // index: which element of the elements array we are adding to the result.
+ // result_pos: the position to which we are currently copying characters.
+
+ // Copy the separator to the result.
+ __ mov(string, separator_operand);
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
+
+ __ bind(&loop_3_entry);
+ // Get string = array[index].
+ __ mov(string, FieldOperand(elements, index,
+ times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ mov(string_length,
+ FieldOperand(string, String::kLengthOffset));
+ __ shr(string_length, 1);
+ __ lea(string,
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
- __ jmp(&loop); // End while (index < length).
+
+ __ cmp(index, array_length_operand);
+ __ j(less, &loop_3); // End while (index < length).
+ __ jmp(&done);
+
__ bind(&bailout);
- __ mov(result, Factory::undefined_value());
+ __ mov(result_operand, Factory::undefined_value());
__ bind(&done);
- __ mov(eax, result);
+ __ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(5 * kPointerSize));
+ __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 90bfd4b6..f570fe01 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1231,8 +1231,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
@@ -1325,7 +1329,9 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d32f95d3..2d3eac14 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-codegen-ia32.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -135,6 +139,17 @@ bool LCodeGen::GeneratePrologue() {
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
}
}
@@ -261,6 +276,53 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
}
+Operand LCodeGen::HighOperand(LOperand* op) {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ int offset = (index >= 0) ? index + 3 : index - 1;
+ return Operand(ebp, -offset * kPointerSize);
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@@ -385,7 +447,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
++frame_count;
}
Translation translation(&translations_, frame_count);
- environment->WriteTranslation(this, &translation);
+ WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
environment->Register(deoptimization_index, translation.index());
deoptimizations_.Add(environment);
@@ -557,66 +619,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // xmm0 must always be a scratch register.
- XMMRegister xmm_scratch = xmm0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register cpu_scratch = esi;
- bool destroys_cpu_scratch = false;
-
- LGapResolver resolver(move->move_operands(), &marker_operand);
- const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.from();
- LOperand* to = move.to();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
- if (from->IsConstantOperand()) {
- __ mov(ToOperand(to), ToImmediate(from));
- } else if (from == &marker_operand) {
- if (to->IsRegister() || to->IsStackSlot()) {
- __ mov(ToOperand(to), cpu_scratch);
- ASSERT(destroys_cpu_scratch);
- } else {
- ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
- __ movdbl(ToOperand(to), xmm_scratch);
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsStackSlot()) {
- __ mov(cpu_scratch, ToOperand(from));
- destroys_cpu_scratch = true;
- } else {
- ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
- __ movdbl(xmm_scratch, ToOperand(from));
- }
- } else if (from->IsRegister()) {
- __ mov(ToOperand(to), ToRegister(from));
- } else if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ push(eax);
- __ mov(eax, ToOperand(from));
- __ mov(ToOperand(to), eax);
- __ pop(eax);
- } else if (from->IsDoubleRegister()) {
- __ movdbl(ToOperand(to), ToDoubleRegister(from));
- } else if (to->IsDoubleRegister()) {
- __ movdbl(ToDoubleRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- __ movdbl(xmm_scratch, ToOperand(from));
- __ movdbl(ToOperand(to), xmm_scratch);
- }
- }
-
- if (destroys_cpu_scratch) {
- __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
- }
+ resolver_.Resolve(move);
}
@@ -703,11 +706,11 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- LOperand* right = instr->right();
+ LOperand* right = instr->InputAt(1);
ASSERT(ToRegister(instr->result()).is(edx));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
Register right_reg = ToRegister(right);
@@ -743,11 +746,11 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->right();
+ LOperand* right = instr->InputAt(1);
ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
Register left_reg = eax;
@@ -789,11 +792,11 @@ void LCodeGen::DoDivI(LDivI* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
+ Register left = ToRegister(instr->InputAt(0));
+ LOperand* right = instr->InputAt(1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->temp()), left);
+ __ mov(ToRegister(instr->TempAt(0)), left);
}
if (right->IsConstantOperand()) {
@@ -817,7 +820,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
// Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->temp()), ToOperand(right));
+ __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@@ -826,8 +829,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
@@ -867,8 +870,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
@@ -923,8 +926,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -979,22 +982,22 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
}
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temporary());
+ Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
NearLabel done;
// If the object is a smi return the object.
@@ -1011,14 +1014,14 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->input()));
+ __ push(ToOperand(instr->InputAt(0)));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1029,8 +1032,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1046,8 +1049,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
// Modulo uses a fixed result register.
ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
switch (instr->op()) {
@@ -1086,8 +1089,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(edx));
- ASSERT(ToRegister(instr->right()).is(eax));
+ ASSERT(ToRegister(instr->InputAt(0)).is(edx));
+ ASSERT(ToRegister(instr->InputAt(1)).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1128,17 +1131,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->input());
+ XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
__ cmp(reg, Factory::true_value());
EmitBranch(true_block, false_block, equal);
@@ -1266,8 +1269,8 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
NearLabel unordered;
@@ -1292,8 +1295,8 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1312,8 +1315,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
@@ -1326,8 +1329,8 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1337,7 +1340,7 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// TODO(fsc): If the expression is known to be a smi, then it's
@@ -1375,7 +1378,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
@@ -1396,7 +1399,7 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
__ j(zero, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
+ Register scratch = ToRegister(instr->TempAt(0));
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
@@ -1435,9 +1438,9 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
+ Register temp = ToRegister(instr->TempAt(0));
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
@@ -1455,9 +1458,9 @@ void LCodeGen::DoIsObject(LIsObject* instr) {
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1471,7 +1474,7 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Operand input = ToOperand(instr->input());
+ Operand input = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
@@ -1485,7 +1488,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->input());
+ Operand input = ToOperand(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1495,9 +1498,9 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
-InstanceType LHasInstanceType::TestType() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
@@ -1505,9 +1508,9 @@ InstanceType LHasInstanceType::TestType() {
-Condition LHasInstanceType::BranchCondition() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
@@ -1517,15 +1520,15 @@ Condition LHasInstanceType::BranchCondition() {
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask));
NearLabel done, is_false;
__ j(zero, &is_false);
- __ CmpObjectType(input, instr->TestType(), result);
- __ j(NegateCondition(instr->BranchCondition()), &is_false);
+ __ CmpObjectType(input, TestType(instr->hydrogen()), result);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
__ mov(result, Handle<Object>(Heap::true_value()));
__ jmp(&done);
__ bind(&is_false);
@@ -1535,8 +1538,8 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1546,13 +1549,13 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
__ test(input, Immediate(kSmiTagMask));
__ j(zero, false_label);
- __ CmpObjectType(input, instr->TestType(), temp);
- EmitBranch(true_block, false_block, instr->BranchCondition());
+ __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
@@ -1568,7 +1571,7 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1637,10 +1640,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
- Register temp = ToRegister(instr->temporary());
+ Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
NearLabel done;
Label is_true, is_false;
@@ -1660,9 +1663,9 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->input());
- Register temp = ToRegister(instr->temporary());
- Register temp2 = ToRegister(instr->temporary2());
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
if (input.is(temp)) {
// Swap.
Register swapper = temp;
@@ -1684,7 +1687,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -1741,8 +1744,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
+ Register object = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
// A Smi is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
@@ -1752,7 +1755,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
NearLabel cache_miss;
- Register map = ToRegister(instr->temp());
+ Register map = ToRegister(instr->TempAt(0));
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
__ cmp(map, Factory::the_hole_value()); // Patched to cached map.
@@ -1800,7 +1803,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// Get the temp register reserved by the instruction. This needs to be edi as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
+ Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(edi));
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 13;
@@ -1905,13 +1908,21 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
- Register value = ToRegister(instr->input());
+ Register value = ToRegister(instr->InputAt(0));
__ mov(Operand::Cell(instr->hydrogen()->cell()), value);
}
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ // TODO(antonm): load a context with a separate instruction.
+ Register result = ToRegister(instr->result());
+ __ LoadContext(result, instr->context_chain_length());
+ __ mov(result, ContextOperand(result, instr->slot_index()));
+}
+
+
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->input());
+ Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
@@ -1934,7 +1945,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->temporary());
+ Register temp = ToRegister(instr->TempAt(0));
Register result = ToRegister(instr->result());
// Check that the function really is a function.
@@ -1975,8 +1986,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- ASSERT(instr->result()->Equals(instr->input()));
- Register reg = ToRegister(instr->input());
+ ASSERT(instr->result()->Equals(instr->InputAt(0)));
+ Register reg = ToRegister(instr->InputAt(0));
__ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
if (FLAG_debug_code) {
NearLabel done;
@@ -2009,32 +2020,15 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register key = ToRegister(instr->key());
- Register result;
- if (instr->load_result() != NULL) {
- result = ToRegister(instr->load_result());
- } else {
- result = ToRegister(instr->result());
- ASSERT(result.is(elements));
- }
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(elements));
// Load the result.
__ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
- Representation r = instr->hydrogen()->representation();
- if (r.IsInteger32()) {
- // Untag and check for smi.
- __ SmiUntag(result);
- DeoptimizeIf(carry, instr->environment());
- } else if (r.IsDouble()) {
- EmitNumberUntagD(result,
- ToDoubleRegister(instr->result()),
- instr->environment());
- } else {
- // Check for the hole value.
- ASSERT(r.IsTagged());
- __ cmp(result, Factory::the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
+ // Check for the hole value.
+ __ cmp(result, Factory::the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
}
@@ -2073,7 +2067,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->input());
+ Operand elem = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
NearLabel done;
@@ -2147,7 +2141,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->input();
+ LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
__ push(ToImmediate(argument));
} else {
@@ -2213,7 +2207,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
Factory::heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
@@ -2280,17 +2274,17 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
LUnaryMathOperation* instr_;
};
- ASSERT(instr->input()->Equals(instr->result()));
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
__ pxor(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
@@ -2302,7 +2296,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
Label not_smi;
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ test(input_reg, Immediate(kSmiTagMask));
__ j(not_zero, deferred->entry());
@@ -2323,7 +2317,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@@ -2345,7 +2339,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
// xmm_scratch = 0.5
ExternalReference one_half = ExternalReference::address_of_one_half();
@@ -2378,7 +2372,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@@ -2386,7 +2380,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
ExternalReference negative_infinity =
ExternalReference::address_of_negative_infinity();
@@ -2398,8 +2392,8 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
@@ -2512,6 +2506,7 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(ToRegister(instr->InputAt(0)).is(ecx));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
@@ -2561,7 +2556,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->input()).is(edi));
+ ASSERT(ToRegister(instr->InputAt(0)).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
@@ -2588,12 +2583,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
if (instr->needs_write_barrier()) {
- Register temp = ToRegister(instr->temp());
+ Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, offset, value, temp);
}
} else {
- Register temp = ToRegister(instr->temp());
+ Register temp = ToRegister(instr->TempAt(0));
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
if (instr->needs_write_barrier()) {
@@ -2657,7 +2652,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -2675,7 +2670,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LNumberTagI* instr_;
};
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -2688,7 +2683,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
@@ -2738,9 +2733,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->input());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
+ Register tmp = ToRegister(instr->TempAt(0));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
@@ -2770,7 +2765,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
@@ -2778,7 +2773,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(ToRegister(input), Immediate(kSmiTagMask));
@@ -2838,7 +2833,7 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
NearLabel done, heap_number;
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -2881,7 +2876,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
NearLabel deopt;
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cmp(input_reg, 0x80000000u);
@@ -2898,7 +2893,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(not_equal, instr->environment());
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+ XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
@@ -2918,7 +2913,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -2938,7 +2933,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -2951,7 +2946,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
@@ -2988,9 +2983,60 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
- // This will bail out if the input was not in the int32 range (or,
- // unfortunately, if the input was 0x80000000).
- DeoptimizeIf(equal, instr->environment());
+ NearLabel done;
+ Register temp_reg = ToRegister(instr->TempAt(0));
+ XMMRegister xmm_scratch = xmm0;
+
+ // If cvttsd2si succeeded, we're done. Otherwise, we attempt
+ // manual conversion.
+ __ j(not_equal, &done);
+
+ // Get high 32 bits of the input in result_reg and temp_reg.
+ __ pshufd(xmm_scratch, input_reg, 1);
+ __ movd(Operand(temp_reg), xmm_scratch);
+ __ mov(result_reg, temp_reg);
+
+ // Prepare negation mask in temp_reg.
+ __ sar(temp_reg, kBitsPerInt - 1);
+
+ // Extract the exponent from result_reg and subtract adjusted
+ // bias from it. The adjustment is selected in a way such that
+ // when the difference is zero, the answer is in the low 32 bits
+ // of the input, otherwise a shift has to be performed.
+ __ shr(result_reg, HeapNumber::kExponentShift);
+ __ and_(result_reg,
+ HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
+ __ sub(Operand(result_reg),
+ Immediate(HeapNumber::kExponentBias +
+ HeapNumber::kExponentBits +
+ HeapNumber::kMantissaBits));
+ // Don't handle big (> kMantissaBits + kExponentBits == 63) or
+ // special exponents.
+ DeoptimizeIf(greater, instr->environment());
+
+ // Zero out the sign and the exponent in the input (by shifting
+ // it to the left) and restore the implicit mantissa bit,
+ // i.e. convert the input to unsigned int64 shifted left by
+ // kExponentBits.
+ ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
+ // Minus zero has the most significant bit set and the other
+ // bits cleared.
+ __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
+ __ psllq(input_reg, HeapNumber::kExponentBits);
+ __ por(input_reg, xmm_scratch);
+
+ // Get the amount to shift the input right in xmm_scratch.
+ __ neg(result_reg);
+ __ movd(xmm_scratch, Operand(result_reg));
+
+ // Shift the input right and extract low 32 bits.
+ __ psrlq(input_reg, xmm_scratch);
+ __ movd(Operand(result_reg), input_reg);
+
+ // Use the prepared mask in temp_reg to negate the result if necessary.
+ __ xor_(result_reg, Operand(temp_reg));
+ __ sub(result_reg, Operand(temp_reg));
+ __ bind(&done);
}
} else {
NearLabel done;
@@ -3017,7 +3063,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(instr->condition(), instr->environment());
@@ -3025,8 +3071,8 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
@@ -3050,15 +3096,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->input()->IsRegister());
- Register reg = ToRegister(instr->input());
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -3067,26 +3113,25 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
-void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
- if (Heap::InNewSpace(*prototype)) {
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+ if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
- Factory::NewJSGlobalPropertyCell(prototype);
+ Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
- __ mov(result, prototype);
+ __ mov(result, object);
}
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->temp());
+ Register reg = ToRegister(instr->TempAt(0));
Handle<JSObject> holder = instr->holder();
- Handle<Map> receiver_map = instr->receiver_map();
- Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+ Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -3096,7 +3141,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadPrototype(reg, current_prototype);
+ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@@ -3224,7 +3269,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
if (input->IsConstantOperand()) {
__ push(ToImmediate(input));
} else {
@@ -3235,7 +3280,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
@@ -3258,7 +3303,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -3410,3 +3455,5 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
#undef __
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 41ac39a4..ab62e6fe 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,15 +34,16 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "ia32/lithium-gap-resolver-ia32.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
+class LGapNode;
class SafepointGenerator;
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -58,10 +59,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ Operand ToOperand(LOperand* op) const;
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ Immediate ToImmediate(LOperand* op);
+
+ // The operand denoting the second word (the one with a higher address) of
+ // a double stack slot.
+ Operand HighOperand(LOperand* op);
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -83,6 +98,9 @@ class LCodeGen BASE_EMBEDDED {
// Parallel move support.
void DoParallelMove(LParallelMove* move);
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -104,7 +122,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -150,7 +167,7 @@ class LCodeGen BASE_EMBEDDED {
int arity,
LInstruction* instr);
- void LoadPrototype(Register result, Handle<JSObject> prototype);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@@ -166,11 +183,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
- Immediate ToImmediate(LOperand* op);
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
@@ -230,6 +243,9 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
new file mode 100644
index 00000000..88869590
--- /dev/null
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -0,0 +1,461 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-gap-resolver-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32), spilled_register_(-1) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ source_uses_[i] = 0;
+ destination_uses_[i] = 0;
+ }
+}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(HasBeenReset());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ Finish();
+ ASSERT(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) AddMove(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ RemoveMove(index);
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+ LOperand* source = move.source();
+ if (source->IsRegister()) ++source_uses_[source->index()];
+
+ LOperand* destination = move.destination();
+ if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+ moves_.Add(move);
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+ LOperand* source = moves_[index].source();
+ if (source->IsRegister()) {
+ --source_uses_[source->index()];
+ ASSERT(source_uses_[source->index()] >= 0);
+ }
+
+ LOperand* destination = moves_[index].destination();
+ if (destination->IsRegister()) {
+ --destination_uses_[destination->index()];
+ ASSERT(destination_uses_[destination->index()] >= 0);
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+ int count = 0;
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+ int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
+ return Register::FromAllocationIndex(i);
+ }
+ }
+ return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+ if (!moves_.is_empty()) return false;
+ if (spilled_register_ >= 0) return false;
+
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] != 0) return false;
+ if (destination_uses_[i] != 0) return false;
+ }
+ return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+ if (spilled_register_ >= 0) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+ if (operand->IsRegister() && operand->index() == spilled_register_) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+ // 1. We may have already spilled to create a temp register.
+ if (spilled_register_ >= 0) {
+ return Register::FromAllocationIndex(spilled_register_);
+ }
+
+ // 2. We may have a free register that we can use without spilling.
+ Register free = GetFreeRegisterNot(no_reg);
+ if (!free.is(no_reg)) return free;
+
+ // 3. Prefer to spill a register that is not used in any remaining move
+ // because it will not need to be restored until the end.
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
+ Register scratch = Register::FromAllocationIndex(i);
+ __ push(scratch);
+ spilled_register_ = i;
+ return scratch;
+ }
+ }
+
+ // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
+ Register scratch = Register::FromAllocationIndex(0);
+ __ push(scratch);
+ spilled_register_ = 0;
+ return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = cgen_->ToRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ Register tmp = EnsureTempRegister();
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(tmp, src);
+ __ mov(dst, tmp);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Immediate src = cgen_->ToImmediate(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsDoubleRegister()) {
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movdbl(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(xmm0, src);
+ __ movdbl(dst, xmm0);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Register-memory. Use a free register as a temp if possible. Do not
+ // spill on demand because the simple spill implementation cannot avoid
+ // spilling src at this point.
+ Register tmp = GetFreeRegisterNot(no_reg);
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ if (tmp.is(no_reg)) {
+ __ xor_(reg, mem);
+ __ xor_(mem, reg);
+ __ xor_(reg, mem);
+ } else {
+ __ mov(tmp, mem);
+ __ mov(mem, reg);
+ __ mov(reg, tmp);
+ }
+
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory. Spill on demand to use a temporary. If there is a
+ // free register after that, use it as a second temporary.
+ Register tmp0 = EnsureTempRegister();
+ Register tmp1 = GetFreeRegisterNot(tmp0);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ if (tmp1.is(no_reg)) {
+ // Only one temp register available to us.
+ __ mov(tmp0, dst);
+ __ xor_(tmp0, src);
+ __ xor_(src, tmp0);
+ __ xor_(tmp0, src);
+ __ mov(dst, tmp0);
+ } else {
+ __ mov(tmp0, dst);
+ __ mov(tmp1, src);
+ __ mov(dst, tmp1);
+ __ mov(src, tmp0);
+ }
+
+ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ // XMM register-register or register-memory. We rely on having xmm0
+ // available as a fixed scratch register.
+ ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+ ? source
+ : destination);
+ Operand other =
+ cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
+ __ movdbl(xmm0, other);
+ __ movdbl(other, reg);
+ __ movdbl(reg, Operand(xmm0));
+
+ } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ // Double-width memory-to-memory. Spill on demand to use a general
+ // purpose temporary register and also rely on having xmm0 available as
+ // a fixed scratch register.
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ __ movdbl(src0, xmm0);
+
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ RemoveMove(index);
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+
+ // In addition to swapping the actual uses as sources, we need to update
+ // the use counts.
+ if (source->IsRegister() && destination->IsRegister()) {
+ int temp = source_uses_[source->index()];
+ source_uses_[source->index()] = source_uses_[destination->index()];
+ source_uses_[destination->index()] = temp;
+ } else if (source->IsRegister()) {
+ // We don't have use counts for non-register operands like destination.
+ // Compute those counts now.
+ source_uses_[source->index()] = CountSourceUses(source);
+ } else if (destination->IsRegister()) {
+ source_uses_[destination->index()] = CountSourceUses(destination);
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
new file mode 100644
index 00000000..f0bd260a
--- /dev/null
+++ b/src/ia32/lithium-gap-resolver-ia32.h
@@ -0,0 +1,110 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+
+#include "v8.h"
+
+#include "lithium-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit any code necessary at the end of a gap move.
+ void Finish();
+
+ // Add or delete a move from the move graph without emitting any code.
+ // Used to build up the graph and remove trivial moves.
+ void AddMove(LMoveOperands move);
+ void RemoveMove(int index);
+
+ // Report the count of uses of operand as a source in a not-yet-performed
+ // move. Used to rebuild use counts.
+ int CountSourceUses(LOperand* operand);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Ensure that the given operand is not spilled.
+ void EnsureRestored(LOperand* operand);
+
+ // Return a register that can be used as a temp register, spilling
+ // something if necessary.
+ Register EnsureTempRegister();
+
+ // Return a known free register different from the given one (which could
+ // be no_reg---returning any free register), or no_reg if there is no such
+ // register.
+ Register GetFreeRegisterNot(Register reg);
+
+ // Verify that the state is the initial one, ready to resolve a single
+ // parallel move.
+ bool HasBeenReset();
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ // Source and destination use counts for the general purpose registers.
+ int source_uses_[Register::kNumAllocatableRegisters];
+ int destination_uses_[Register::kNumAllocatableRegisters];
+
+ // If we had to spill on demand, the currently spilled register's
+ // allocation index.
+ int spilled_register_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 6355f16f..cca07c8c 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
#include "ia32/lithium-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
@@ -64,12 +68,12 @@ void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
}
-void LInstruction::PrintTo(StringStream* stream) const {
+void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
if (HasResult()) {
- result()->PrintTo(stream);
- stream->Add(" ");
+ PrintOutputOperandTo(stream);
}
+
PrintDataTo(stream);
if (HasEnvironment()) {
@@ -84,37 +88,33 @@ void LInstruction::PrintTo(StringStream* stream) const {
}
-void LLabel::PrintDataTo(StringStream* stream) const {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ inputs_.PrintOperandsTo(stream);
}
-bool LParallelMove::IsRedundant() const {
- for (int i = 0; i < move_operands_.length(); ++i) {
- if (!move_operands_[i].IsRedundant()) return false;
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ results_.PrintOperandsTo(stream);
+}
+
+
+template<typename T, int N>
+void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
+ for (int i = 0; i < N; i++) {
+ if (i > 0) stream->Add(" ");
+ elems_[i]->PrintTo(stream);
}
- return true;
}
-void LParallelMove::PrintDataTo(StringStream* stream) const {
- for (int i = move_operands_.length() - 1; i >= 0; --i) {
- if (!move_operands_[i].IsEliminated()) {
- LOperand* from = move_operands_[i].from();
- LOperand* to = move_operands_[i].to();
- if (from->Equals(to)) {
- to->PrintTo(stream);
- } else {
- to->PrintTo(stream);
- stream->Add(" = ");
- from->PrintTo(stream);
- }
- stream->Add("; ");
- }
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
}
}
@@ -130,7 +130,7 @@ bool LGap::IsRedundant() const {
}
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
for (int i = 0; i < 4; i++) {
stream->Add("(");
if (parallel_moves_[i] != NULL) {
@@ -169,74 +169,65 @@ const char* LArithmeticT::Mnemonic() const {
}
-
-void LBinaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- left()->PrintTo(stream);
- stream->Add(" ");
- right()->PrintTo(stream);
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) const {
+void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
-void LBranch::PrintDataTo(StringStream* stream) const {
+void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- left()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -244,74 +235,74 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
}
-void LTypeofIs::PrintDataTo(StringStream* stream) const {
- input()->PrintTo(stream);
+void LTypeofIs::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
}
-void LCallKeyed::PrintDataTo(StringStream* stream) const {
+void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
}
-void LCallNamed::PrintDataTo(StringStream* stream) const {
+void LCallNamed::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallGlobal::PrintDataTo(StringStream* stream) const {
+void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LCallNew::PrintDataTo(StringStream* stream) const {
- LUnaryOperation::PrintDataTo(stream);
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
-void LClassOfTest::PrintDataTo(StringStream* stream) const {
+void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
-void LUnaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- input()->PrintTo(stream);
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -322,11 +313,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
-void LChunk::Verify() const {
- // TODO(twuerthinger): Implement verification for chunk.
-}
-
-
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
@@ -334,7 +320,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
@@ -381,7 +367,7 @@ void LChunk::MarkEmptyBlocks() {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) const {
+void LStoreNamed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -390,7 +376,7 @@ void LStoreNamed::PrintDataTo(StringStream* stream) const {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -588,6 +574,13 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
@@ -598,33 +591,54 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::NONE));
}
-LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr,
+ int index) {
return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg) {
return Define(instr, ToUnallocated(reg));
}
-LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
- XMMRegister reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg) {
return Define(instr, ToUnallocated(reg));
}
@@ -692,20 +706,6 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
- return instr;
-}
-
-
-LOperand* LChunkBuilder::Temp() {
- LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
- allocator_->RecordTemporary(operand);
- return operand;
-}
-
-
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
@@ -781,10 +781,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
can_deopt = !can_truncate;
}
- LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
- if (can_deopt) AssignEnvironment(result);
- return result;
+ LShiftI* result = new LShiftI(op, left, right, can_deopt);
+ return can_deopt
+ ? AssignEnvironment(DefineSameAsFirst(result))
+ : DefineSameAsFirst(result);
}
@@ -813,7 +813,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
- LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -894,8 +894,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch()) {
- instr->set_hydrogen_value(HBranch::cast(current)->value());
+ if (current->IsBranch() && !instr->IsGoto()) {
+ // TODO(fschneider): Handle branch instructions uniformly like
+ // other instructions. This requires us to generate the right
+ // branch instruction already at the HIR level.
+ ASSERT(instr->IsControl());
+ HBranch* branch = HBranch::cast(current);
+ instr->set_hydrogen_value(branch->value());
+ HBasicBlock* first = branch->FirstSuccessor();
+ HBasicBlock* second = branch->SecondSuccessor();
+ ASSERT(first != NULL && second != NULL);
+ instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
@@ -910,59 +919,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-void LEnvironment::WriteTranslation(LCodeGen* cgen,
- Translation* translation) const {
- if (this == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - parameter_count();
-
- outer()->WriteTranslation(cgen, translation);
- int closure_id = cgen->DefineDeoptimizationLiteral(closure());
- translation->BeginFrame(ast_id(), closure_id, height);
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (spilled_registers_ != NULL && value != NULL) {
- if (value->IsRegister() &&
- spilled_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_registers_[value->index()],
- HasTaggedValueAt(i));
- } else if (value->IsDoubleRegister() &&
- spilled_double_registers_[value->index()] != NULL) {
- translation->MarkDuplicate();
- cgen->AddToTranslation(translation,
- spilled_double_registers_[value->index()],
- false);
- }
- }
-
- cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
- }
-}
-
-
-void LEnvironment::PrintTo(StringStream* stream) const {
- stream->Add("[id=%d|", ast_id());
- stream->Add("[parameters=%d|", parameter_count());
- stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
- for (int i = 0; i < values_.length(); ++i) {
- if (i != 0) stream->Add(";");
- if (values_[i] == NULL) {
- stream->Add("[hole]");
- } else {
- values_[i]->PrintTo(stream);
- }
- }
- stream->Add("]");
-}
-
-
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
@@ -985,11 +941,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
} else {
- op = UseOrConstant(value);
- if (op->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(op);
- unalloc->set_policy(LUnallocated::ANY);
- }
+ op = UseAny(value);
}
result->AddValue(op, value->representation());
}
@@ -999,21 +951,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
+ LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
+ instr->include_stack_check());
+ return (instr->include_stack_check())
+ ? AssignPointerMap(result)
+ : result;
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- HBasicBlock* first = instr->FirstSuccessor();
- HBasicBlock* second = instr->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- int first_id = first->block_id();
- int second_id = second->block_id();
-
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
@@ -1021,63 +968,52 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(),
- TempRegister(),
- first_id,
- second_id);
+ TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token();
HValue* left = compare->left();
HValue* right = compare->right();
- if (left->representation().IsInteger32()) {
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
- UseOrConstantAtStart(right),
- first_id,
- second_id,
- false);
- } else if (left->representation().IsDouble()) {
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(op,
- UseRegisterAtStart(left),
- UseRegisterAtStart(right),
- first_id,
- second_id,
- true);
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
bool reversed = op == Token::GT || op == Token::LTE;
LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand,
- first_id,
- second_id);
+ LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+ right_operand);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()),
- first_id,
- second_id);
+ return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister(),
- first_id,
- second_id);
+ TempRegister());
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()), first_id, second_id);
+ UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@@ -1085,10 +1021,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- compare->is_strict(),
- temp,
- first_id,
- second_id);
+ temp);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@@ -1097,42 +1030,34 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
- temp2,
- first_id,
- second_id);
+ temp2);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()),
- first_id,
- second_id);
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
+ LInstanceOfAndBranch* result =
new LInstanceOfAndBranch(
UseFixed(instance_of->left(), InstanceofStub::left()),
- UseFixed(instance_of->right(), InstanceofStub::right()),
- first_id,
- second_id);
+ UseFixed(instance_of->right(), InstanceofStub::right()));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
- first_id,
- second_id);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(first_id);
+ return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(second_id);
+ return new LGoto(instr->SecondSuccessor()->block_id());
}
}
Abort("Undefined compare before branch");
return NULL;
}
}
- return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1155,7 +1080,7 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstruction* result =
+ LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), InstanceofStub::left()),
UseFixed(instr->right(), InstanceofStub::right()));
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1164,7 +1089,7 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
- LInstruction* result =
+ LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(
UseFixed(instr->value(), InstanceofStub::left()),
FixedTemp(edi));
@@ -1178,10 +1103,10 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LInstruction* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1214,11 +1139,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LInstruction* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LInstruction* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1241,8 +1166,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
- UseFixed(instr->key(), ecx);
- return MarkAsCall(DefineFixed(new LCallKeyed, eax), instr);
+ LOperand* key = UseFixed(instr->key(), ecx);
+ return MarkAsCall(DefineFixed(new LCallKeyed(key), eax), instr);
}
@@ -1267,7 +1192,7 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
- LInstruction* result = new LCallNew(constructor);
+ LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1307,7 +1232,9 @@ LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new LBitNotI(input);
+ return DefineSameAsFirst(result);
}
@@ -1327,10 +1254,11 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
} else if (instr->representation().IsInteger32()) {
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
- FixedTemp(edx);
+ LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- return AssignEnvironment(DefineFixed(new LDivI(value, divisor), eax));
+ LDivI* result = new LDivI(value, divisor, temp);
+ return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
@@ -1344,15 +1272,15 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
ASSERT(instr->right()->representation().IsInteger32());
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
- FixedTemp(edx);
+ LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
- LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- result = AssignEnvironment(result);
- }
- return result;
+ LModI* mod = new LModI(value, divisor, temp);
+ LInstruction* result = DefineFixed(mod, edx);
+ return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero))
+ ? AssignEnvironment(result)
+ : result;
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
@@ -1449,21 +1377,26 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
- if (instr->left()->representation().IsInteger32()) {
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, false));
- } else if (instr->left()->representation().IsDouble()) {
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(op, left, right, true));
+ return DefineAsRegister(new LCmpID(left, right));
} else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LInstruction* result = new LCmpT(left, right);
+ LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
@@ -1473,7 +1406,7 @@ LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LInstruction* result = new LCmpJSObjectEq(left, right);
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
}
@@ -1482,8 +1415,7 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsNull(value,
- instr->is_strict()));
+ return DefineAsRegister(new LIsNull(value));
}
@@ -1542,7 +1474,7 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LInstruction* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1565,7 +1497,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1576,7 +1508,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
- LInstruction* res = new LTaggedToI(value, xmm_temp);
+ LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
return DefineSameAsFirst(new LSmiUntag(value, needs_check));
@@ -1589,12 +1521,16 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
- LInstruction* result = new LNumberTagD(value, temp);
+ LNumberTagD* result = new LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+ bool needs_temp = instr->CanTruncateToInt32() &&
+ !CpuFeatures::IsSupported(SSE3);
+ LOperand* value = needs_temp ?
+ UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
@@ -1603,7 +1539,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
- LInstruction* result = new LNumberTagI(value);
+ LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
@@ -1625,17 +1561,14 @@ LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
- LInstruction* result = new LCheckInstanceType(value, temp);
+ LCheckInstanceType* result = new LCheckInstanceType(value, temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
- LInstruction* result =
- new LCheckPrototypeMaps(temp,
- instr->holder(),
- instr->receiver_map());
+ LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
@@ -1654,7 +1587,7 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new LCheckMap(value);
+ LCheckMap* result = new LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1675,14 +1608,14 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
} else {
- Abort("unsupported constant of type double");
+ UNREACHABLE();
return NULL;
}
}
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LInstruction* result = new LLoadGlobal;
+ LLoadGlobal* result = new LLoadGlobal;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
@@ -1694,16 +1627,22 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
}
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ return DefineAsRegister(new LLoadContextSlot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new LLoadNamedField(UseRegisterAtStart(instr->object())));
+ ASSERT(instr->representation().IsTagged());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new LLoadNamedField(obj));
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), eax);
- LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
- return MarkAsCall(result, instr);
+ LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1723,23 +1662,12 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
- Representation r = instr->representation();
- LOperand* obj = UseRegisterAtStart(instr->object());
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* load_result = NULL;
- // Double needs an extra temp, because the result is converted from heap
- // number to a double register.
- if (r.IsDouble()) load_result = TempRegister();
- LInstruction* result = new LLoadKeyedFastElement(obj,
- key,
- load_result);
- if (r.IsDouble()) {
- result = DefineAsRegister(result);
- } else {
- result = DefineSameAsFirst(result);
- }
- return AssignEnvironment(result);
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1747,9 +1675,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
- LInstruction* result =
- DefineFixed(new LLoadKeyedGeneric(object, key), eax);
- return MarkAsCall(result, instr);
+ LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1801,14 +1728,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
- return new LStoreNamedField(obj,
- instr->name(),
- val,
- instr->is_in_object(),
- instr->offset(),
- temp,
- needs_write_barrier,
- instr->transition());
+ return new LStoreNamedField(obj, val, temp);
}
@@ -1816,7 +1736,7 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), edx);
LOperand* val = UseFixed(instr->value(), eax);
- LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+ LStoreNamedGeneric* result = new LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
@@ -1842,8 +1762,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LInstruction* result = new LDeleteProperty(Use(instr->object()),
- UseOrConstant(instr->key()));
+ LDeleteProperty* result = new LDeleteProperty(Use(instr->object()),
+ UseOrConstant(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1884,13 +1804,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
- return DefineAsRegister(AssignEnvironment(result));
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LInstruction* result = new LTypeof(UseAtStart(instr->value()));
+ LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1919,8 +1839,8 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new LLazyBailout;
- result = AssignEnvironment(result);
+ LLazyBailout* lazy_bailout = new LLazyBailout;
+ LInstruction* result = AssignEnvironment(lazy_bailout);
instructions_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
@@ -1956,21 +1876,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-void LPointerMap::RecordPointer(LOperand* op) {
- // Do not record arguments as pointers.
- if (op->IsStackSlot() && op->index() < 0) return;
- ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
- pointer_operands_.Add(op);
-}
-
-
-void LPointerMap::PrintTo(StringStream* stream) const {
- stream->Add("{");
- for (int i = 0; i < pointer_operands_.length(); ++i) {
- if (i != 0) stream->Add(";");
- pointer_operands_[i]->PrintTo(stream);
- }
- stream->Add("} @%d", position());
-}
-
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4b0db40e..67f87518 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -38,17 +38,27 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
// Type hierarchy:
//
// LInstruction
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LBinaryOperation
+// LTemplateInstruction
+// LControlInstruction
+// LBranch
+// LClassOfTestAndBranch
+// LCmpJSObjectEqAndBranch
+// LCmpIDAndBranch
+// LHasCachedArrayIndexAndBranch
+// LHasInstanceTypeAndBranch
+// LInstanceOfAndBranch
+// LIsNullAndBranch
+// LIsObjectAndBranch
+// LIsSmiAndBranch
+// LTypeofIsAndBranch
+// LAccessArgumentsAt
+// LArgumentsElements
+// LArgumentsLength
// LAddI
// LApplyArguments
// LArithmeticD
@@ -56,13 +66,10 @@ class Translation;
// LBitI
// LBoundsCheck
// LCmpID
-// LCmpIDAndBranch
// LCmpJSObjectEq
-// LCmpJSObjectEqAndBranch
// LCmpT
// LDivI
// LInstanceOf
-// LInstanceOfAndBranch
// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
@@ -71,64 +78,59 @@ class Translation;
// LPower
// LShiftI
// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGlobalObject
-// LGlobalReceiver
-// LLabel
-// LLayzBailout
-// LLoadGlobal
-// LMaterializedLiteral
+// LCallConstantFunction
+// LCallFunction
+// LCallGlobal
+// LCallKeyed
+// LCallKnownGlobal
+// LCallNamed
+// LCallRuntime
+// LCallStub
+// LConstant
+// LConstantD
+// LConstantI
+// LConstantT
+// LDeoptimize
+// LFunctionLiteral
+// LGap
+// LLabel
+// LGlobalObject
+// LGlobalReceiver
+// LGoto
+// LLazyBailout
+// LLoadGlobal
+// LCheckPrototypeMaps
+// LLoadContextSlot
// LArrayLiteral
// LObjectLiteral
// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LRegExpConstructResult
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LUnaryOperation
+// LOsrEntry
+// LParameter
+// LRegExpConstructResult
+// LStackCheck
+// LStoreKeyed
+// LStoreKeyedFastElement
+// LStoreKeyedGeneric
+// LStoreNamed
+// LStoreNamedField
+// LStoreNamedGeneric
// LBitNotI
-// LBranch
// LCallNew
// LCheckFunction
+// LCheckPrototypeMaps
// LCheckInstanceType
// LCheckMap
-// LCheckPrototypeMaps
// LCheckSmi
// LClassOfTest
-// LClassOfTestAndBranch
// LDeleteProperty
// LDoubleToI
// LFixedArrayLength
// LHasCachedArrayIndex
-// LHasCachedArrayIndexAndBranch
// LHasInstanceType
-// LHasInstanceTypeAndBranch
// LInteger32ToDouble
// LIsNull
-// LIsNullAndBranch
// LIsObject
-// LIsObjectAndBranch
// LIsSmi
-// LIsSmiAndBranch
// LJSArrayLength
// LLoadNamedField
// LLoadNamedGeneric
@@ -143,19 +145,16 @@ class Translation;
// LThrow
// LTypeof
// LTypeofIs
-// LTypeofIsAndBranch
// LUnaryMathOperation
// LValueOf
-// LUnknownOSRValue
+// LUnknownOSRValue
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(BinaryOperation) \
+ V(ControlInstruction) \
V(Constant) \
V(Call) \
- V(MaterializedLiteral) \
V(StoreKeyed) \
V(StoreNamed) \
- V(UnaryOperation) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -225,6 +224,7 @@ class Translation;
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
@@ -292,14 +292,17 @@ class LInstruction: public ZoneObject {
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream) const;
- virtual void PrintDataTo(StringStream* stream) const { }
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
// Declare virtual type testers.
#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+
virtual bool IsControl() const { return false; }
+ virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_.set(env); }
LEnvironment* environment() const { return environment_.get(); }
@@ -309,9 +312,7 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- void set_result(LOperand* operand) { result_.set(operand); }
- LOperand* result() const { return result_.get(); }
- bool HasResult() const { return result_.is_set(); }
+ virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -329,34 +330,63 @@ class LInstruction: public ZoneObject {
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
- SetOncePointer<LOperand> result_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
};
-class LParallelMove : public ZoneObject {
+template<typename T, int N>
+class OperandContainer {
public:
- LParallelMove() : move_operands_(4) { }
-
- void AddMove(LOperand* from, LOperand* to) {
- move_operands_.Add(LMoveOperands(from, to));
+ OperandContainer() {
+ for (int i = 0; i < N; i++) elems_[i] = NULL;
}
+ int length() { return N; }
+ T& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+ void PrintOperandsTo(StringStream* stream);
- bool IsRedundant() const;
+ private:
+ T elems_[N];
+};
- const ZoneList<LMoveOperands>* move_operands() const {
- return &move_operands_;
- }
- void PrintDataTo(StringStream* stream) const;
+template<typename T>
+class OperandContainer<T, 0> {
+ public:
+ int length() { return 0; }
+ void PrintOperandsTo(StringStream* stream) { }
+};
- private:
- ZoneList<LMoveOperands> move_operands_;
+
+template<int R, int I, int T = 0>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+ OperandContainer<LOperand*, R> results_;
+ OperandContainer<LOperand*, I> inputs_;
+ OperandContainer<LOperand*, T> temps_;
};
-class LGap: public LInstruction {
+class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
@@ -367,7 +397,7 @@ class LGap: public LInstruction {
}
DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
bool IsRedundant() const;
@@ -397,13 +427,13 @@ class LGap: public LInstruction {
};
-class LGoto: public LInstruction {
+class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
LGoto(int block_id, bool include_stack_check = false)
: block_id_(block_id), include_stack_check_(include_stack_check) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
@@ -415,7 +445,7 @@ class LGoto: public LInstruction {
};
-class LLazyBailout: public LInstruction {
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -431,7 +461,7 @@ class LLazyBailout: public LInstruction {
};
-class LDeoptimize: public LInstruction {
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
@@ -444,7 +474,7 @@ class LLabel: public LGap {
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -459,13 +489,13 @@ class LLabel: public LGap {
};
-class LParameter: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LInstruction {
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
@@ -476,96 +506,81 @@ class LCallStub: public LInstruction {
};
-class LUnknownOSRValue: public LInstruction {
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
-class LUnaryOperation: public LInstruction {
+template<int I, int T = 0>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- explicit LUnaryOperation(LOperand* input) : input_(input) { }
-
- DECLARE_INSTRUCTION(UnaryOperation)
-
- LOperand* input() const { return input_; }
-
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* input_;
-};
-
-
-class LBinaryOperation: public LInstruction {
- public:
- LBinaryOperation(LOperand* left, LOperand* right)
- : left_(left), right_(right) { }
-
- DECLARE_INSTRUCTION(BinaryOperation)
+ DECLARE_INSTRUCTION(ControlInstruction)
+ virtual bool IsControl() const { return true; }
- LOperand* left() const { return left_; }
- LOperand* right() const { return right_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+ void SetBranchTargets(int true_block_id, int false_block_id) {
+ true_block_id_ = true_block_id;
+ false_block_id_ = false_block_id;
+ }
private:
- LOperand* left_;
- LOperand* right_;
+ int true_block_id_;
+ int false_block_id_;
};
-class LApplyArguments: public LBinaryOperation {
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements)
- : LBinaryOperation(function, receiver),
- length_(length),
- elements_(elements) { }
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- LOperand* function() const { return left(); }
- LOperand* receiver() const { return right(); }
- LOperand* length() const { return length_; }
- LOperand* elements() const { return elements_; }
-
- private:
- LOperand* length_;
- LOperand* elements_;
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
};
-class LAccessArgumentsAt: public LInstruction {
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
- : arguments_(arguments), length_(length), index_(index) { }
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- LOperand* arguments() const { return arguments_; }
- LOperand* length() const { return length_; }
- LOperand* index() const { return index_; }
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* arguments_;
- LOperand* length_;
- LOperand* index_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LArgumentsLength: public LUnaryOperation {
+class LArgumentsLength: public LTemplateInstruction<1, 1> {
public:
- explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
-class LArgumentsElements: public LInstruction {
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
@@ -573,361 +588,280 @@ class LArgumentsElements: public LInstruction {
};
-class LModI: public LBinaryOperation {
+class LModI: public LTemplateInstruction<1, 2, 1> {
public:
- LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+ LModI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LBinaryOperation {
+class LDivI: public LTemplateInstruction<1, 2, 1> {
public:
- LDivI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMulI: public LBinaryOperation {
+class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp)
- : LBinaryOperation(left, right), temp_(temp) { }
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCmpID: public LBinaryOperation {
+class LCmpID: public LTemplateInstruction<1, 2> {
public:
- LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
- : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
-
- Token::Value op() const { return op_; }
- bool is_double() const { return is_double_; }
+ LCmpID(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- private:
- Token::Value op_;
- bool is_double_;
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
};
-class LCmpIDAndBranch: public LCmpID {
+class LCmpIDAndBranch: public LControlInstruction<2> {
public:
- LCmpIDAndBranch(Token::Value op,
- LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id,
- bool is_double)
- : LCmpID(op, left, right, is_double),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LUnaryMathOperation: public LUnaryOperation {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
public:
- explicit LUnaryMathOperation(LOperand* value)
- : LUnaryOperation(value) { }
+ explicit LUnaryMathOperation(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
-class LCmpJSObjectEq: public LBinaryOperation {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) {}
+ LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
};
-class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpJSObjectEq(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
"cmp-jsobject-eq-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LIsNull: public LUnaryOperation {
+class LIsNull: public LTemplateInstruction<1, 1> {
public:
- LIsNull(LOperand* value, bool is_strict)
- : LUnaryOperation(value), is_strict_(is_strict) {}
+ explicit LIsNull(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
- bool is_strict() const { return is_strict_; }
-
- private:
- bool is_strict_;
+ bool is_strict() const { return hydrogen()->is_strict(); }
};
-class LIsNullAndBranch: public LIsNull {
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
public:
- LIsNullAndBranch(LOperand* value,
- bool is_strict,
- LOperand* temp,
- int true_block_id,
- int false_block_id)
- : LIsNull(value, is_strict),
- temp_(temp),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LIsNullAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
- LOperand* temp() const { return temp_; }
+ bool is_strict() const { return hydrogen()->is_strict(); }
- private:
- LOperand* temp_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsObject: public LUnaryOperation {
+class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) {}
+ LIsObject(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LIsObjectAndBranch: public LIsObject {
+class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
- LIsObjectAndBranch(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- int true_block_id,
- int false_block_id)
- : LIsObject(value, temp),
- temp2_(temp2),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- LOperand* temp2() const { return temp2_; }
- private:
- LOperand* temp2_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LUnaryOperation {
+class LIsSmi: public LTemplateInstruction<1, 1> {
public:
- explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
-class LIsSmiAndBranch: public LIsSmi {
+class LIsSmiAndBranch: public LControlInstruction<1> {
public:
- LIsSmiAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LIsSmi(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LUnaryOperation {
+class LHasInstanceType: public LTemplateInstruction<1, 1> {
public:
- explicit LHasInstanceType(LOperand* value)
- : LUnaryOperation(value) { }
+ explicit LHasInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- InstanceType TestType(); // The type to test against when generating code.
- Condition BranchCondition(); // The branch condition for 'true'.
};
-class LHasInstanceTypeAndBranch: public LHasInstanceType {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
- LHasInstanceTypeAndBranch(LOperand* value,
- LOperand* temporary,
- int true_block_id,
- int false_block_id)
- : LHasInstanceType(value),
- temp_(temporary),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- LOperand* temp() { return temp_; }
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
- private:
- LOperand* temp_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasCachedArrayIndex: public LUnaryOperation {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
public:
- explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+ explicit LHasCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
-class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
public:
- LHasCachedArrayIndexAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LHasCachedArrayIndex(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTest: public LUnaryOperation {
+class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
public:
- LClassOfTest(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temporary_(temp) {}
+ LClassOfTest(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- virtual void PrintDataTo(StringStream* stream) const;
-
- LOperand* temporary() { return temporary_; }
-
- private:
- LOperand *temporary_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTestAndBranch: public LClassOfTest {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
- LClassOfTestAndBranch(LOperand* value,
- LOperand* temporary,
- LOperand* temporary2,
- int true_block_id,
- int false_block_id)
- : LClassOfTest(value, temporary),
- temporary2_(temporary2),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- LOperand* temporary2() { return temporary2_; }
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- private:
- LOperand* temporary2_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpT: public LBinaryOperation {
+class LCmpT: public LTemplateInstruction<1, 2> {
public:
- LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
@@ -936,90 +870,78 @@ class LCmpT: public LBinaryOperation {
};
-class LCmpTAndBranch: public LCmpT {
+class LCmpTAndBranch: public LControlInstruction<2> {
public:
- LCmpTAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpT(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpTAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ Token::Value op() const { return hydrogen()->token(); }
};
-class LInstanceOf: public LBinaryOperation {
+class LInstanceOf: public LTemplateInstruction<1, 2> {
public:
- LInstanceOf(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfAndBranch: public LInstanceOf {
+class LInstanceOfAndBranch: public LControlInstruction<2> {
public:
- LInstanceOfAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LInstanceOf(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LInstanceOfAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LInstanceOfKnownGlobal: public LUnaryOperation {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* left, LOperand* temp)
- : LUnaryOperation(left), temp_(temp) { }
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LBoundsCheck: public LBinaryOperation {
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
- LBoundsCheck(LOperand* index, LOperand* length)
- : LBinaryOperation(index, length) { }
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
- LOperand* index() const { return left(); }
- LOperand* length() const { return right(); }
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
};
-class LBitI: public LBinaryOperation {
+class LBitI: public LTemplateInstruction<1, 2> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -1030,10 +952,13 @@ class LBitI: public LBinaryOperation {
};
-class LShiftI: public LBinaryOperation {
+class LShiftI: public LTemplateInstruction<1, 2> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -1047,17 +972,19 @@ class LShiftI: public LBinaryOperation {
};
-class LSubI: public LBinaryOperation {
+class LSubI: public LTemplateInstruction<1, 2> {
public:
- LSubI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LConstant: public LInstruction {
+class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
@@ -1098,31 +1025,24 @@ class LConstantT: public LConstant {
};
-class LBranch: public LUnaryOperation {
+class LBranch: public LControlInstruction<1> {
public:
- LBranch(LOperand* input, int true_block_id, int false_block_id)
- : LUnaryOperation(input),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpMapAndBranch: public LUnaryOperation {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
public:
- explicit LCmpMapAndBranch(LOperand* value) : LUnaryOperation(value) { }
+ explicit LCmpMapAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
@@ -1139,79 +1059,91 @@ class LCmpMapAndBranch: public LUnaryOperation {
};
-class LJSArrayLength: public LUnaryOperation {
+class LJSArrayLength: public LTemplateInstruction<1, 1> {
public:
- explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
-class LFixedArrayLength: public LUnaryOperation {
+class LFixedArrayLength: public LTemplateInstruction<1, 1> {
public:
- explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LFixedArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
};
-class LValueOf: public LUnaryOperation {
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
- LValueOf(LOperand* input, LOperand* temporary)
- : LUnaryOperation(input), temporary_(temporary) { }
-
- LOperand* temporary() const { return temporary_; }
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- LOperand* temporary_;
};
-class LThrow: public LUnaryOperation {
+class LThrow: public LTemplateInstruction<0, 1> {
public:
- explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LBitNotI: public LUnaryOperation {
+class LBitNotI: public LTemplateInstruction<1, 1> {
public:
- explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
-class LAddI: public LBinaryOperation {
+class LAddI: public LTemplateInstruction<1, 2> {
public:
- LAddI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LPower: public LBinaryOperation {
+class LPower: public LTemplateInstruction<1, 2> {
public:
- LPower(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
-class LArithmeticD: public LBinaryOperation {
+class LArithmeticD: public LTemplateInstruction<1, 2> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
@@ -1223,10 +1155,13 @@ class LArithmeticD: public LBinaryOperation {
};
-class LArithmeticT: public LBinaryOperation {
+class LArithmeticT: public LTemplateInstruction<1, 2> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@@ -1238,163 +1173,186 @@ class LArithmeticT: public LBinaryOperation {
};
-class LReturn: public LUnaryOperation {
+class LReturn: public LTemplateInstruction<0, 1> {
public:
- explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LUnaryOperation {
+class LLoadNamedField: public LTemplateInstruction<1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedGeneric: public LUnaryOperation {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
public:
- explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() const { return input(); }
+ LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
-class LLoadFunctionPrototype: public LUnaryOperation {
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temporary)
- : LUnaryOperation(function), temporary_(temporary) { }
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
- LOperand* function() const { return input(); }
- LOperand* temporary() const { return temporary_; }
-
- private:
- LOperand* temporary_;
+ LOperand* function() { return inputs_[0]; }
};
-class LLoadElements: public LUnaryOperation {
+class LLoadElements: public LTemplateInstruction<1, 1> {
public:
- explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
-class LLoadKeyedFastElement: public LBinaryOperation {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
public:
- LLoadKeyedFastElement(LOperand* elements,
- LOperand* key,
- LOperand* load_result)
- : LBinaryOperation(elements, key),
- load_result_(load_result) { }
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
- LOperand* elements() const { return left(); }
- LOperand* key() const { return right(); }
- LOperand* load_result() const { return load_result_; }
-
- private:
- LOperand* load_result_;
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadKeyedGeneric: public LBinaryOperation {
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key)
- : LBinaryOperation(obj, key) { }
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadGlobal: public LInstruction {
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
};
-class LStoreGlobal: public LUnaryOperation {
+class LStoreGlobal: public LTemplateInstruction<0, 1> {
public:
- explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+ explicit LStoreGlobal(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
};
-class LPushArgument: public LUnaryOperation {
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
public:
- explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() { return hydrogen()->context_chain_length(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
-class LGlobalObject: public LInstruction {
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LInstruction {
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
-class LCallConstantFunction: public LInstruction {
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- Handle<JSFunction> function() const { return hydrogen()->function(); }
+ Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LInstruction {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LInstruction {
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LInstruction {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1403,44 +1361,46 @@ class LCallFunction: public LInstruction {
};
-class LCallGlobal: public LInstruction {
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LInstruction {
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LUnaryOperation {
+class LCallNew: public LTemplateInstruction<1, 1> {
public:
- explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LInstruction {
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
@@ -1450,40 +1410,44 @@ class LCallRuntime: public LInstruction {
};
-class LInteger32ToDouble: public LUnaryOperation {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
public:
- explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
-class LNumberTagI: public LUnaryOperation {
+class LNumberTagI: public LTemplateInstruction<1, 1> {
public:
- explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagD: public LUnaryOperation {
+class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberTagD(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ explicit LNumberTagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LUnaryOperation {
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+ LDoubleToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
@@ -1493,42 +1457,46 @@ class LDoubleToI: public LUnaryOperation {
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LUnaryOperation {
+class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
- LTaggedToI(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LTaggedToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LSmiTag: public LUnaryOperation {
+class LSmiTag: public LTemplateInstruction<1, 1> {
public:
- explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
-class LNumberUntagD: public LUnaryOperation {
+class LNumberUntagD: public LTemplateInstruction<1, 1> {
public:
- explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
};
-class LSmiUntag: public LUnaryOperation {
+class LSmiUntag: public LTemplateInstruction<1, 1> {
public:
- LSmiUntag(LOperand* use, bool needs_check)
- : LUnaryOperation(use), needs_check_(needs_check) { }
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
@@ -1539,89 +1507,66 @@ class LSmiUntag: public LUnaryOperation {
};
-class LStoreNamed: public LInstruction {
+class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
- : object_(obj), name_(name), value_(val) { }
+ LStoreNamed(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_INSTRUCTION(StoreNamed)
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
- Handle<Object> name() const { return name_; }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- Handle<Object> name_;
- LOperand* value_;
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
class LStoreNamedField: public LStoreNamed {
public:
- LStoreNamedField(LOperand* obj,
- Handle<Object> name,
- LOperand* val,
- bool in_object,
- int offset,
- LOperand* temp,
- bool needs_write_barrier,
- Handle<Map> transition)
- : LStoreNamed(obj, name, val),
- is_in_object_(in_object),
- offset_(offset),
- temp_(temp),
- needs_write_barrier_(needs_write_barrier),
- transition_(transition) { }
+ LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
+ : LStoreNamed(obj, val) {
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
- bool is_in_object() { return is_in_object_; }
- int offset() { return offset_; }
- LOperand* temp() { return temp_; }
- bool needs_write_barrier() { return needs_write_barrier_; }
- Handle<Map> transition() const { return transition_; }
- void set_transition(Handle<Map> map) { transition_ = map; }
-
- private:
- bool is_in_object_;
- int offset_;
- LOperand* temp_;
- bool needs_write_barrier_;
- Handle<Map> transition_;
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LStoreNamed {
public:
- LStoreNamedGeneric(LOperand* obj,
- Handle<Object> name,
- LOperand* val)
- : LStoreNamed(obj, name, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val)
+ : LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
-class LStoreKeyed: public LInstruction {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
- : object_(obj), key_(key), value_(val) { }
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
DECLARE_INSTRUCTION(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
- LOperand* key() const { return key_; }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- LOperand* key_;
- LOperand* value_;
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
@@ -1645,65 +1590,60 @@ class LStoreKeyedGeneric: public LStoreKeyed {
};
-class LCheckFunction: public LUnaryOperation {
+class LCheckFunction: public LTemplateInstruction<0, 1> {
public:
- explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
-class LCheckInstanceType: public LUnaryOperation {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
public:
- LCheckInstanceType(LOperand* use, LOperand* temp)
- : LUnaryOperation(use), temp_(temp) { }
+ LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCheckMap: public LUnaryOperation {
+class LCheckMap: public LTemplateInstruction<0, 1> {
public:
- explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
-class LCheckPrototypeMaps: public LInstruction {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
- LCheckPrototypeMaps(LOperand* temp,
- Handle<JSObject> holder,
- Handle<Map> receiver_map)
- : temp_(temp),
- holder_(holder),
- receiver_map_(receiver_map) { }
+ explicit LCheckPrototypeMaps(LOperand* temp) {
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- LOperand* temp() const { return temp_; }
- Handle<JSObject> holder() const { return holder_; }
- Handle<Map> receiver_map() const { return receiver_map_; }
-
- private:
- LOperand* temp_;
- Handle<JSObject> holder_;
- Handle<Map> receiver_map_;
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
};
-class LCheckSmi: public LUnaryOperation {
+class LCheckSmi: public LTemplateInstruction<0, 1> {
public:
- LCheckSmi(LOperand* use, Condition condition)
- : LUnaryOperation(use), condition_(condition) { }
+ LCheckSmi(LOperand* value, Condition condition)
+ : condition_(condition) {
+ inputs_[0] = value;
+ }
Condition condition() const { return condition_; }
@@ -1717,34 +1657,28 @@ class LCheckSmi: public LUnaryOperation {
};
-class LMaterializedLiteral: public LInstruction {
- public:
- DECLARE_INSTRUCTION(MaterializedLiteral)
-};
-
-
-class LArrayLiteral: public LMaterializedLiteral {
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
-class LObjectLiteral: public LMaterializedLiteral {
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
-class LRegExpLiteral: public LMaterializedLiteral {
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LInstruction {
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -1753,61 +1687,61 @@ class LFunctionLiteral: public LInstruction {
};
-class LTypeof: public LUnaryOperation {
+class LTypeof: public LTemplateInstruction<1, 1> {
public:
- explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LUnaryOperation {
+class LTypeofIs: public LTemplateInstruction<1, 1> {
public:
- explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit LTypeofIs(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LTypeofIsAndBranch: public LTypeofIs {
+class LTypeofIsAndBranch: public LControlInstruction<1> {
public:
- LTypeofIsAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LTypeofIs(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LDeleteProperty: public LBinaryOperation {
+class LDeleteProperty: public LTemplateInstruction<1, 2> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LOsrEntry: public LInstruction {
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
@@ -1830,114 +1764,12 @@ class LOsrEntry: public LInstruction {
};
-class LStackCheck: public LInstruction {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
};
-class LPointerMap: public ZoneObject {
- public:
- explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
-
- const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
- int position() const { return position_; }
- int lithium_position() const { return lithium_position_; }
-
- void set_lithium_position(int pos) {
- ASSERT(lithium_position_ == -1);
- lithium_position_ = pos;
- }
-
- void RecordPointer(LOperand* op);
- void PrintTo(StringStream* stream) const;
-
- private:
- ZoneList<LOperand*> pointer_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- int ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer)
- : closure_(closure),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- values_(value_count),
- representations_(value_count),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer) {
- }
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
-
- void AddValue(LOperand* operand, Representation representation) {
- values_.Add(operand);
- representations_.Add(representation);
- }
-
- bool HasTaggedValueAt(int index) const {
- return representations_[index].IsTagged();
- }
-
- void Register(int deoptimization_index, int translation_index) {
- ASSERT(!HasBeenRegistered());
- deoptimization_index_ = deoptimization_index;
- translation_index_ = translation_index;
- }
- bool HasBeenRegistered() const {
- return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
- }
-
- void SetSpilledRegisters(LOperand** registers,
- LOperand** double_registers) {
- spilled_registers_ = registers;
- spilled_double_registers_ = double_registers;
- }
-
- // Emit frame translation commands for this environment.
- void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
-
- void PrintTo(StringStream* stream) const;
-
- private:
- Handle<JSFunction> closure_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- int ast_id_;
- int parameter_count_;
- ZoneList<LOperand*> values_;
- ZoneList<Representation> representations_;
-
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
-
- LEnvironment* outer_;
-};
-
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@@ -1993,8 +1825,6 @@ class LChunk: public ZoneObject {
inlined_closures_.Add(closure);
}
- void Verify() const;
-
private:
int spill_slot_count_;
HGraph* const graph_;
@@ -2051,9 +1881,10 @@ class LChunkBuilder BASE_EMBEDDED {
LUnallocated* ToUnallocated(XMMRegister reg);
// Methods for setting up define-use relationships.
- LOperand* Use(HValue* value, LUnallocated* operand);
- LOperand* UseFixed(HValue* value, Register fixed_register);
- LOperand* UseFixedDouble(HValue* value, XMMRegister fixed_register);
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ XMMRegister fixed_register);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
@@ -2063,27 +1894,53 @@ class LChunkBuilder BASE_EMBEDDED {
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
- LOperand* UseRegister(HValue* value);
- LOperand* UseRegisterAtStart(HValue* value);
-
- // A value in a register that may be trashed.
- LOperand* UseTempRegister(HValue* value);
- LOperand* Use(HValue* value);
- LOperand* UseAtStart(HValue* value);
- LOperand* UseOrConstant(HValue* value);
- LOperand* UseOrConstantAtStart(HValue* value);
- LOperand* UseRegisterOrConstant(HValue* value);
- LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // An input operand in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An input operand in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An input operand in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An input operand in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- LInstruction* Define(LInstruction* instr, LUnallocated* result);
- LInstruction* Define(LInstruction* instr);
- LInstruction* DefineAsRegister(LInstruction* instr);
- LInstruction* DefineAsSpilled(LInstruction* instr, int index);
- LInstruction* DefineSameAsFirst(LInstruction* instr);
- LInstruction* DefineFixed(LInstruction* instr, Register reg);
- LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
@@ -2104,13 +1961,6 @@ class LChunkBuilder BASE_EMBEDDED {
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
- // Temporary operand that may be a memory location.
- LOperand* Temp();
- // Temporary operand that must be in a register.
- LUnallocated* TempRegister();
- LOperand* FixedTemp(Register reg);
- LOperand* FixedTemp(XMMRegister reg);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a6f46790..10c942a5 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -877,55 +877,53 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Immediate(Factory::cons_ascii_string_map()));
}
-// All registers must be distinct. Only current_string needs valid contents
-// on entry. All registers may be invalid on exit. result_operand is
-// unchanged, padding_chars is updated correctly.
-void MacroAssembler::AppendStringToTopOfNewSpace(
- Register current_string, // Tagged pointer to string to copy.
- Register current_string_length,
- Register result_pos,
- Register scratch,
- Register new_padding_chars,
- Operand operand_result,
- Operand operand_padding_chars,
- Label* bailout) {
- mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- shr(current_string_length, 1);
- sub(current_string_length, operand_padding_chars);
- mov(new_padding_chars, current_string_length);
- add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
- and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
- sub(new_padding_chars, Operand(current_string_length));
- neg(new_padding_chars);
- // We need an allocation even if current_string_length is 0, to fetch
- // result_pos. Consider using a faster fetch of result_pos in that case.
- AllocateInNewSpace(current_string_length, result_pos, scratch, no_reg,
- bailout, NO_ALLOCATION_FLAGS);
- sub(result_pos, operand_padding_chars);
- mov(operand_padding_chars, new_padding_chars);
-
- Register scratch_2 = new_padding_chars; // Used to compute total length.
- // Copy string to the end of result.
- mov(current_string_length,
- FieldOperand(current_string, String::kLengthOffset));
- mov(scratch, operand_result);
- mov(scratch_2, current_string_length);
- add(scratch_2, FieldOperand(scratch, String::kLengthOffset));
- mov(FieldOperand(scratch, String::kLengthOffset), scratch_2);
- shr(current_string_length, 1);
- lea(current_string,
- FieldOperand(current_string, SeqAsciiString::kHeaderSize));
- // Loop condition: while (--current_string_length >= 0).
- Label copy_loop;
- Label copy_loop_entry;
- jmp(&copy_loop_entry);
- bind(&copy_loop);
- mov_b(scratch, Operand(current_string, current_string_length, times_1, 0));
- mov_b(Operand(result_pos, current_string_length, times_1, 0), scratch);
- bind(&copy_loop_entry);
- sub(Operand(current_string_length), Immediate(1));
- j(greater_equal, &copy_loop);
+
+// Copy memory, byte-by-byte, from source to destination. Not optimized for
+// long or aligned copies. The contents of scratch and length are destroyed.
+// Source and destination are incremented by length.
+// Many variants of movsb, loop unrolling, word moves, and indexed operands
+// have been tried here already, and this is fastest.
+// A simpler loop is faster on small copies, but 30% slower on large ones.
+// The cld() instruction must have been emitted, to set the direction flag(),
+// before calling this function.
+void MacroAssembler::CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch) {
+ Label loop, done, short_string, short_loop;
+ // Experimentation shows that the short string loop is faster if length < 10.
+ cmp(Operand(length), Immediate(10));
+ j(less_equal, &short_string);
+
+ ASSERT(source.is(esi));
+ ASSERT(destination.is(edi));
+ ASSERT(length.is(ecx));
+
+ // Because source is 4-byte aligned in our uses of this function,
+ // we keep source aligned for the rep_movs call by copying the odd bytes
+ // at the end of the ranges.
+ mov(scratch, Operand(source, length, times_1, -4));
+ mov(Operand(destination, length, times_1, -4), scratch);
+ mov(scratch, ecx);
+ shr(ecx, 2);
+ rep_movs();
+ and_(Operand(scratch), Immediate(0x3));
+ add(destination, Operand(scratch));
+ jmp(&done);
+
+ bind(&short_string);
+ test(length, Operand(length));
+ j(zero, &done);
+
+ bind(&short_loop);
+ mov_b(scratch, Operand(source, 0));
+ mov_b(Operand(destination, 0), scratch);
+ inc(source);
+ inc(destination);
+ dec(length);
+ j(not_zero, &short_loop);
+
+ bind(&done);
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6f5fa872..6f180c6c 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -386,22 +386,13 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
- // All registers must be distinct. Only current_string needs valid contents
- // on entry. All registers may be invalid on exit. result_operand is
- // unchanged, padding_chars is updated correctly.
- // The top of new space must contain a sequential ascii string with
- // padding_chars bytes free in its top word. The sequential ascii string
- // current_string is concatenated to it, allocating the necessary amount
- // of new memory.
- void AppendStringToTopOfNewSpace(
- Register current_string, // Tagged pointer to string to copy.
- Register current_string_length,
- Register result_pos,
- Register scratch,
- Register new_padding_chars,
- Operand operand_result,
- Operand operand_padding_chars,
- Label* bailout);
+ // Copy memory, byte-by-byte, from source to destination. Not optimized for
+ // long or aligned copies.
+ // The contents of index and scratch are destroyed.
+ void CopyBytes(Register source,
+ Register destination,
+ Register length,
+ Register scratch);
// ---------------------------------------------------------------------------
// Support functions.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bcb02ed7..45d63c5a 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1362,11 +1362,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
+ MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
+ kind_);
Object* obj;
- { MaybeObject* maybe_obj =
- StubCache::ComputeCallMiss(arguments().immediate(), kind_);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
return obj;
}
@@ -1685,9 +1684,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
- GenerateNameCheck(name, &miss);
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1715,7 +1720,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@@ -1723,11 +1728,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::nan_value()));
- __ ret((argc + 1) * kPointerSize);
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::nan_value()));
+ __ ret((argc + 1) * kPointerSize);
+ }
__ bind(&miss);
+ // Restore function name in ecx.
+ __ Set(ecx, Immediate(Handle<String>(name)));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1758,9 +1768,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
- GenerateNameCheck(name, &miss);
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1790,7 +1806,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@@ -1798,11 +1814,16 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(Factory::empty_string()));
- __ ret((argc + 1) * kPointerSize);
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::empty_string()));
+ __ ret((argc + 1) * kPointerSize);
+ }
__ bind(&miss);
+ // Restore function name in ecx.
+ __ Set(ecx, Immediate(Handle<String>(name)));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
diff --git a/src/ic.cc b/src/ic.cc
index 645c6fdc..afae3235 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -154,24 +154,20 @@ static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
}
-IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
- IC::State state = target->ic_state();
-
- if (state != MONOMORPHIC || !name->IsString()) return state;
- if (receiver->IsUndefined() || receiver->IsNull()) return state;
-
+static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
+ Object* receiver,
+ Object* name) {
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
-
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheHolder is not applicable.
- return MONOMORPHIC;
+ return false;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
// IC::GetCodeCacheHolder is not applicable.
- return MONOMORPHIC;
+ return false;
}
Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
@@ -185,20 +181,37 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// to prototype check failure.
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
- Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
- }
-
- // Remove the target from the code cache to avoid hitting the same
- // invalid stub again.
map->RemoveFromCodeCache(String::cast(name), target, index);
+ return true;
+ }
+
+ return false;
+}
+
+
+IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
+ IC::State state = target->ic_state();
+
+ if (state != MONOMORPHIC || !name->IsString()) return state;
+ if (receiver->IsUndefined() || receiver->IsNull()) return state;
+
+ // For keyed load/store/call, the most likely cause of cache failure is
+ // that the key has changed. We do not distinguish between
+ // prototype and non-prototype failures for keyed access.
+ Code::Kind kind = target->kind();
+ if (kind == Code::KEYED_LOAD_IC ||
+ kind == Code::KEYED_STORE_IC ||
+ kind == Code::KEYED_CALL_IC) {
+ return MONOMORPHIC;
+ }
+ // Remove the target from the code cache if it became invalid
+ // because of changes in the prototype chain to avoid hitting it
+ // again.
+ // Call stubs handle this later to allow extra IC state
+ // transitions.
+ if (kind != Code::CALL_IC &&
+ TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@@ -482,6 +495,7 @@ void CallICBase::ReceiverToObject(Handle<Object> object) {
MaybeObject* CallICBase::LoadFunction(State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
@@ -527,7 +541,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
// Lookup is valid: Update inline cache and stub cache.
if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
+ UpdateCaches(&lookup, state, extra_ic_state, object, name);
}
// Get the property.
@@ -576,8 +590,142 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
+bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
+ Handle<Object> object,
+ Code::ExtraICState* extra_ic_state) {
+ ASSERT(kind_ == Code::CALL_IC);
+ if (lookup->type() != CONSTANT_FUNCTION) return false;
+ JSFunction* function = lookup->GetConstantFunction();
+ if (!function->shared()->HasBuiltinFunctionId()) return false;
+
+ // Fetch the arguments passed to the called function.
+ const int argc = target()->arguments_count();
+ Address entry = Top::c_entry_fp(Top::GetCurrentThread());
+ Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ Arguments args(argc + 1,
+ &Memory::Object_at(fp +
+ StandardFrameConstants::kCallerSPOffset +
+ argc * kPointerSize));
+ switch (function->shared()->builtin_function_id()) {
+ case kStringCharCodeAt:
+ case kStringCharAt:
+ if (object->IsString()) {
+ String* string = String::cast(*object);
+ // Check that there's the right wrapper in the receiver slot.
+ ASSERT(string == JSValue::cast(args[0])->value());
+ // If we're in the default (fastest) state and the index is
+ // out of bounds, update the state to record this fact.
+ if (*extra_ic_state == DEFAULT_STRING_STUB &&
+ argc >= 1 && args[1]->IsNumber()) {
+ double index;
+ if (args[1]->IsSmi()) {
+ index = Smi::cast(args[1])->value();
+ } else {
+ ASSERT(args[1]->IsHeapNumber());
+ index = DoubleToInteger(HeapNumber::cast(args[1])->value());
+ }
+ if (index < 0 || index >= string->length()) {
+ *extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
+ return true;
+ }
+ }
+ }
+ break;
+ default:
+ return false;
+ }
+ return false;
+}
+
+
+MaybeObject* CallICBase::ComputeMonomorphicStub(
+ LookupResult* lookup,
+ State state,
+ Code::ExtraICState extra_ic_state,
+ Handle<Object> object,
+ Handle<String> name) {
+ int argc = target()->arguments_count();
+ InLoopFlag in_loop = target()->ic_in_loop();
+ MaybeObject* maybe_code = NULL;
+ switch (lookup->type()) {
+ case FIELD: {
+ int index = lookup->GetFieldIndex();
+ maybe_code = StubCache::ComputeCallField(argc,
+ in_loop,
+ kind_,
+ *name,
+ *object,
+ lookup->holder(),
+ index);
+ break;
+ }
+ case CONSTANT_FUNCTION: {
+ // Get the constant function and compute the code stub for this
+ // call; used for rewriting to monomorphic state and making sure
+ // that the code stub is in the stub cache.
+ JSFunction* function = lookup->GetConstantFunction();
+ maybe_code = StubCache::ComputeCallConstant(argc,
+ in_loop,
+ kind_,
+ extra_ic_state,
+ *name,
+ *object,
+ lookup->holder(),
+ function);
+ break;
+ }
+ case NORMAL: {
+ if (!object->IsJSObject()) return NULL;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+ if (lookup->holder()->IsGlobalObject()) {
+ GlobalObject* global = GlobalObject::cast(lookup->holder());
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ if (!cell->value()->IsJSFunction()) return NULL;
+ JSFunction* function = JSFunction::cast(cell->value());
+ maybe_code = StubCache::ComputeCallGlobal(argc,
+ in_loop,
+ kind_,
+ *name,
+ *receiver,
+ global,
+ cell,
+ function);
+ } else {
+ // There is only one shared stub for calling normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (lookup->holder() != *receiver) return NULL;
+ maybe_code = StubCache::ComputeCallNormal(argc,
+ in_loop,
+ kind_,
+ *name,
+ *receiver);
+ }
+ break;
+ }
+ case INTERCEPTOR: {
+ ASSERT(HasInterceptorGetter(lookup->holder()));
+ maybe_code = StubCache::ComputeCallInterceptor(argc,
+ kind_,
+ *name,
+ *object,
+ lookup->holder());
+ break;
+ }
+ default:
+ maybe_code = NULL;
+ break;
+ }
+ return maybe_code;
+}
+
+
void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
@@ -594,90 +742,44 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
MaybeObject* maybe_code = NULL;
- Object* code;
+ bool had_proto_failure = false;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
maybe_code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
} else if (state == MONOMORPHIC) {
- maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
- } else {
- // Compute monomorphic stub.
- switch (lookup->type()) {
- case FIELD: {
- int index = lookup->GetFieldIndex();
- maybe_code = StubCache::ComputeCallField(argc,
- in_loop,
- kind_,
- *name,
- *object,
- lookup->holder(),
- index);
- break;
- }
- case CONSTANT_FUNCTION: {
- // Get the constant function and compute the code stub for this
- // call; used for rewriting to monomorphic state and making sure
- // that the code stub is in the stub cache.
- JSFunction* function = lookup->GetConstantFunction();
- maybe_code = StubCache::ComputeCallConstant(argc,
- in_loop,
- kind_,
- *name,
- *object,
- lookup->holder(),
- function);
- break;
- }
- case NORMAL: {
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (lookup->holder()->IsGlobalObject()) {
- GlobalObject* global = GlobalObject::cast(lookup->holder());
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- if (!cell->value()->IsJSFunction()) return;
- JSFunction* function = JSFunction::cast(cell->value());
- maybe_code = StubCache::ComputeCallGlobal(argc,
- in_loop,
- kind_,
- *name,
- *receiver,
- global,
- cell,
- function);
- } else {
- // There is only one shared stub for calling normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (lookup->holder() != *receiver) return;
- maybe_code = StubCache::ComputeCallNormal(argc,
- in_loop,
- kind_,
- *name,
- *receiver);
- }
- break;
- }
- case INTERCEPTOR: {
- ASSERT(HasInterceptorGetter(lookup->holder()));
- maybe_code = StubCache::ComputeCallInterceptor(argc,
- kind_,
- *name,
- *object,
- lookup->holder());
- break;
- }
- default:
- return;
+ if (kind_ == Code::CALL_IC &&
+ TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
+ maybe_code = ComputeMonomorphicStub(lookup,
+ state,
+ extra_ic_state,
+ object,
+ name);
+ } else if (kind_ == Code::CALL_IC &&
+ TryRemoveInvalidPrototypeDependentStub(target(),
+ *object,
+ *name)) {
+ had_proto_failure = true;
+ maybe_code = ComputeMonomorphicStub(lookup,
+ state,
+ extra_ic_state,
+ object,
+ name);
+ } else {
+ maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
}
+ } else {
+ maybe_code = ComputeMonomorphicStub(lookup,
+ state,
+ extra_ic_state,
+ object,
+ name);
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
+ Object* code;
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache.
@@ -696,7 +798,9 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
StubCache::Set(*name, map, Code::cast(code));
}
+ USE(had_proto_failure);
#ifdef DEBUG
+ if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
name, state, target(), in_loop ? " (in-loop)" : "");
#endif
@@ -707,7 +811,10 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<Object> object,
Handle<Object> key) {
if (key->IsSymbol()) {
- return CallICBase::LoadFunction(state, object, Handle<String>::cast(key));
+ return CallICBase::LoadFunction(state,
+ Code::kNoExtraICState,
+ object,
+ Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
@@ -1641,11 +1748,13 @@ MUST_USE_RESULT MaybeObject* CallIC_Miss(Arguments args) {
ASSERT(args.length() == 2);
CallIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ MaybeObject* maybe_result = ic.LoadFunction(state,
+ extra_ic_state,
+ args.at<Object>(0),
+ args.at<String>(1));
Object* result;
- { MaybeObject* maybe_result =
- ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// The first time the inline cache is updated may be the first time the
// function it references gets called. If the function was lazily compiled
diff --git a/src/ic.h b/src/ic.h
index 8562bcc2..9996affa 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -193,16 +193,29 @@ class CallICBase: public IC {
public:
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
protected:
Code::Kind kind_;
+ bool TryUpdateExtraICState(LookupResult* lookup,
+ Handle<Object> object,
+ Code::ExtraICState* extra_ic_state);
+
+ MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
+ LookupResult* lookup,
+ State state,
+ Code::ExtraICState extra_ic_state,
+ Handle<Object> object,
+ Handle<String> name);
+
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
State state,
+ Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
diff --git a/src/inspector.cc b/src/inspector.cc
new file mode 100644
index 00000000..8fb80f1a
--- /dev/null
+++ b/src/inspector.cc
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+#include "inspector.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef INSPECTOR
+
+//============================================================================
+// The Inspector.
+
+void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
+ // Dump the object pointer.
+ OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
+ if (obj->IsHeapObject()) {
+ HeapObject *hobj = HeapObject::cast(obj);
+ OS::FPrint(out, " size %d :", hobj->Size());
+ }
+
+ // Dump each object classification that matches this object.
+#define FOR_EACH_TYPE(type) \
+ if (obj->Is##type()) { \
+ OS::FPrint(out, " %s", #type); \
+ }
+ OBJECT_TYPE_LIST(FOR_EACH_TYPE)
+ HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE)
+#undef FOR_EACH_TYPE
+}
+
+
+#endif // INSPECTOR
+
+} } // namespace v8::internal
+
diff --git a/src/inspector.h b/src/inspector.h
new file mode 100644
index 00000000..f8b30428
--- /dev/null
+++ b/src/inspector.h
@@ -0,0 +1,62 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_INSPECTOR_H_
+#define V8_INSPECTOR_H_
+
+// Only build this code if we're configured with the INSPECTOR.
+#ifdef INSPECTOR
+
+#include "v8.h"
+
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+class Inspector {
+ public:
+
+ static void DumpObjectType(FILE* out, Object *obj, bool print_more);
+ static void DumpObjectType(FILE* out, Object *obj) {
+ DumpObjectType(out, obj, false);
+ }
+ static void DumpObjectType(Object *obj, bool print_more) {
+ DumpObjectType(stdout, obj, print_more);
+ }
+ static void DumpObjectType(Object *obj) {
+ DumpObjectType(stdout, obj, false);
+ }
+};
+
+} } // namespace v8::internal
+
+#endif // INSPECTOR
+
+#endif // V8_INSPECTOR_H_
+
diff --git a/src/json.js b/src/json.js
index 0034176b..e90d5d1d 100644
--- a/src/json.js
+++ b/src/json.js
@@ -179,24 +179,60 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
function BasicSerializeArray(value, stack, builder) {
+ var len = value.length;
+ if (len == 0) {
+ builder.push("[]");
+ return;
+ }
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
builder.push("[");
- var len = value.length;
- for (var i = 0; i < len; i++) {
+ var val = value[0];
+ if (IS_STRING(val)) {
+ // First entry is a string. Remaining entries are likely to be strings too.
+ builder.push(%QuoteJSONString(val));
+ for (var i = 1; i < len; i++) {
+ val = value[i];
+ if (IS_STRING(val)) {
+ builder.push(%QuoteJSONStringComma(val));
+ } else {
+ builder.push(",");
+ var before = builder.length;
+ BasicJSONSerialize(i, value[i], stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
+ }
+ } else if (IS_NUMBER(val)) {
+ // First entry is a number. Remaining entries are likely to be numbers too.
+ builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
+ for (var i = 1; i < len; i++) {
+ builder.push(",");
+ val = value[i];
+ if (IS_NUMBER(val)) {
+ builder.push(NUMBER_IS_FINITE(val)
+ ? %_NumberToString(val)
+ : "null");
+ } else {
+ var before = builder.length;
+ BasicJSONSerialize(i, value[i], stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
+ }
+ } else {
var before = builder.length;
- BasicJSONSerialize(i, value, stack, builder);
+ BasicJSONSerialize(0, val, stack, builder);
if (before == builder.length) builder.push("null");
- builder.push(",");
+ for (var i = 1; i < len; i++) {
+ builder.push(",");
+ before = builder.length;
+ val = value[i];
+ BasicJSONSerialize(i, val, stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
}
stack.pop();
- if (builder.pop() != ",") {
- builder.push("[]"); // Zero length array. Push "[" back on.
- } else {
- builder.push("]");
- }
-
+ builder.push("]");
}
@@ -205,31 +241,31 @@ function BasicSerializeObject(value, stack, builder) {
throw MakeTypeError('circular_structure', []);
}
builder.push("{");
+ var first = true;
for (var p in value) {
if (%HasLocalProperty(value, p)) {
- builder.push(%QuoteJSONString(p));
+ if (!first) {
+ builder.push(%QuoteJSONStringComma(p));
+ } else {
+ builder.push(%QuoteJSONString(p));
+ }
builder.push(":");
var before = builder.length;
- BasicJSONSerialize(p, value, stack, builder);
+ BasicJSONSerialize(p, value[p], stack, builder);
if (before == builder.length) {
builder.pop();
builder.pop();
} else {
- builder.push(",");
+ first = false;
}
}
}
stack.pop();
- if (builder.pop() != ",") {
- builder.push("{}"); // Object has no own properties. Push "{" back on.
- } else {
- builder.push("}");
- }
+ builder.push("}");
}
-function BasicJSONSerialize(key, holder, stack, builder) {
- var value = holder[key];
+function BasicJSONSerialize(key, value, stack, builder) {
if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) {
@@ -266,7 +302,7 @@ function BasicJSONSerialize(key, holder, stack, builder) {
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
var builder = [];
- BasicJSONSerialize('', {'': value}, [], builder);
+ BasicJSONSerialize('', value, [], builder);
if (builder.length == 0) return;
var result = %_FastAsciiArrayJoin(builder, "");
if (!IS_UNDEFINED(result)) return result;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index e0f2e621..8e7c35f5 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -425,7 +425,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Vector<int32_t> output) {
+ Vector<int> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0);
@@ -521,8 +521,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers);
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
- jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
- registers.length()));
+ jsregexp, subject, previous_index, Vector<int>(registers.vector(),
+ registers.length()));
if (res == RE_SUCCESS) {
int capture_register_count =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 6f04be36..af28a872 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -114,7 +114,7 @@ class RegExpImpl {
static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
- Vector<int32_t> registers);
+ Vector<int> registers);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index abdef093..2bbc6b65 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -745,10 +745,10 @@ void LAllocator::AddConstraintsGapMove(int index,
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands cur = move_operands->at(i);
- LOperand* cur_to = cur.to();
+ LOperand* cur_to = cur.destination();
if (cur_to->IsUnallocated()) {
if (cur_to->VirtualRegister() == from->VirtualRegister()) {
- move->AddMove(cur.from(), to);
+ move->AddMove(cur.source(), to);
return;
}
}
@@ -828,6 +828,10 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
} else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+ // The live range of writable input registers always goes until the end
+ // of the instruction.
+ ASSERT(!cur_input->IsUsedAtStart());
+
LUnallocated* input_copy = cur_input->CopyUnconstrained();
cur_input->set_virtual_register(next_virtual_register_++);
@@ -837,7 +841,6 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
cur_input->virtual_register() - first_artificial_register_);
}
- second->AddTemp(cur_input);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
}
}
@@ -893,8 +896,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
if (cur->IsIgnored()) continue;
- LOperand* from = cur->from();
- LOperand* to = cur->to();
+ LOperand* from = cur->source();
+ LOperand* to = cur->destination();
HPhi* phi = LookupPhi(to);
LOperand* hint = to;
if (phi != NULL) {
@@ -1214,9 +1217,9 @@ void LAllocator::BuildLiveRanges() {
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
for (int j = 0; j < move->move_operands()->length(); ++j) {
- LOperand* to = move->move_operands()->at(j).to();
+ LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
- hint = move->move_operands()->at(j).from();
+ hint = move->move_operands()->at(j).source();
phi_operand = to;
break;
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 454e3024..48c65631 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -50,7 +50,6 @@ class LArgument;
class LChunk;
class LConstantOperand;
class LGap;
-class LInstruction;
class LParallelMove;
class LPointerMap;
class LStackSlot;
@@ -322,27 +321,49 @@ class LUnallocated: public LOperand {
class LMoveOperands BASE_EMBEDDED {
public:
- LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
+ LMoveOperands(LOperand* source, LOperand* destination)
+ : source_(source), destination_(destination) {
+ }
+
+ LOperand* source() const { return source_; }
+ void set_source(LOperand* operand) { source_ = operand; }
+
+ LOperand* destination() const { return destination_; }
+ void set_destination(LOperand* operand) { destination_ = operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const {
+ return destination_ == NULL && source_ != NULL;
+ }
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(LOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
- LOperand* from() const { return from_; }
- LOperand* to() const { return to_; }
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
- return IsEliminated() || from_->Equals(to_) || IsIgnored();
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
- bool IsEliminated() const { return from_ == NULL; }
+
bool IsIgnored() const {
- if (to_ != NULL && to_->IsUnallocated() &&
- LUnallocated::cast(to_)->HasIgnorePolicy()) {
- return true;
- }
- return false;
+ return destination_ != NULL &&
+ destination_->IsUnallocated() &&
+ LUnallocated::cast(destination_)->HasIgnorePolicy();
}
- void Eliminate() { from_ = to_ = NULL; }
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ ASSERT(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
private:
- LOperand* from_;
- LOperand* to_;
+ LOperand* source_;
+ LOperand* destination_;
};
@@ -706,6 +727,7 @@ class LiveRange: public ZoneObject {
bool HasAllocatedSpillOperand() const {
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
}
+
LOperand* GetSpillOperand() const { return spill_operand_; }
void SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
@@ -723,7 +745,6 @@ class LiveRange: public ZoneObject {
bool Covers(LifetimePosition position);
LifetimePosition FirstIntersection(LiveRange* other);
-
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end);
void AddUseInterval(LifetimePosition start, LifetimePosition end);
diff --git a/src/lithium.cc b/src/lithium.cc
index 92e81d32..d6cff256 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -30,149 +30,66 @@
namespace v8 {
namespace internal {
-
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand)
- : nodes_(4),
- identified_cycles_(4),
- result_(4),
- marker_operand_(marker_operand),
- next_visited_id_(0) {
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
+bool LParallelMove::IsRedundant() const {
+ for (int i = 0; i < move_operands_.length(); ++i) {
+ if (!move_operands_[i].IsRedundant()) return false;
}
+ return true;
}
-const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i]);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+ bool first = true;
+ for (int i = 0; i < move_operands_.length(); ++i) {
+ if (!move_operands_[i].IsEliminated()) {
+ LOperand* source = move_operands_[i].source();
+ LOperand* destination = move_operands_[i].destination();
+ if (!first) stream->Add(" ");
+ first = false;
+ if (source->Equals(destination)) {
+ destination->PrintTo(stream);
+ } else {
+ destination->PrintTo(stream);
+ stream->Add(" = ");
+ source->PrintTo(stream);
}
- if (!node->IsResolved()) ++unresolved_nodes;
+ stream->Add(";");
}
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start) {
- ZoneList<LOperand*> circle_operands(8);
- circle_operands.Add(marker_operand_);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- circle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- circle_operands.Add(marker_operand_);
-
- for (int i = circle_operands.length() - 1; i > 0; --i) {
- LOperand* from = circle_operands[i];
- LOperand* to = circle_operands[i - 1];
- AddResultMove(from, to);
}
}
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
+void LEnvironment::PrintTo(StringStream* stream) {
+ stream->Add("[id=%d|", ast_id());
+ stream->Add("[parameters=%d|", parameter_count());
+ stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+ for (int i = 0; i < values_.length(); ++i) {
+ if (i != 0) stream->Add(";");
+ if (values_[i] == NULL) {
+ stream->Add("[hole]");
+ } else {
+ values_[i]->PrintTo(stream);
+ }
}
-
- return cur == b;
+ stream->Add("]");
}
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
+void LPointerMap::RecordPointer(LOperand* op) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ pointer_operands_.Add(op);
}
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.from()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.from(), move.to());
- } else {
- LGapNode* from = LookupNode(move.from());
- LGapNode* to = LookupNode(move.to());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a circle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
+void LPointerMap::PrintTo(StringStream* stream) {
+ stream->Add("{");
+ for (int i = 0; i < pointer_operands_.length(); ++i) {
+ if (i != 0) stream->Add(";");
+ pointer_operands_[i]->PrintTo(stream);
}
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
+ stream->Add("} @%d", position());
}
diff --git a/src/lithium.h b/src/lithium.h
index 0ea37699..5f7c92fc 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -28,36 +28,139 @@
#ifndef V8_LITHIUM_H_
#define V8_LITHIUM_H_
+#include "hydrogen.h"
#include "lithium-allocator.h"
+#include "safepoint-table.h"
namespace v8 {
namespace internal {
-class LGapNode;
+class LParallelMove : public ZoneObject {
+ public:
+ LParallelMove() : move_operands_(4) { }
+
+ void AddMove(LOperand* from, LOperand* to) {
+ move_operands_.Add(LMoveOperands(from, to));
+ }
+
+ bool IsRedundant() const;
+
+ const ZoneList<LMoveOperands>* move_operands() const {
+ return &move_operands_;
+ }
-class LGapResolver BASE_EMBEDDED {
+ void PrintDataTo(StringStream* stream) const;
+
+ private:
+ ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LPointerMap: public ZoneObject {
public:
- LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
- const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+ explicit LPointerMap(int position)
+ : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+ const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+ int position() const { return position_; }
+ int lithium_position() const { return lithium_position_; }
+
+ void set_lithium_position(int pos) {
+ ASSERT(lithium_position_ == -1);
+ lithium_position_ = pos;
+ }
+
+ void RecordPointer(LOperand* op);
+ void PrintTo(StringStream* stream);
private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- LOperand* marker_operand_;
- int next_visited_id_;
- int bailout_after_ast_id_;
+ ZoneList<LOperand*> pointer_operands_;
+ int position_;
+ int lithium_position_;
};
+class LEnvironment: public ZoneObject {
+ public:
+ LEnvironment(Handle<JSFunction> closure,
+ int ast_id,
+ int parameter_count,
+ int argument_count,
+ int value_count,
+ LEnvironment* outer)
+ : closure_(closure),
+ arguments_stack_height_(argument_count),
+ deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+ translation_index_(-1),
+ ast_id_(ast_id),
+ parameter_count_(parameter_count),
+ values_(value_count),
+ representations_(value_count),
+ spilled_registers_(NULL),
+ spilled_double_registers_(NULL),
+ outer_(outer) {
+ }
+
+ Handle<JSFunction> closure() const { return closure_; }
+ int arguments_stack_height() const { return arguments_stack_height_; }
+ int deoptimization_index() const { return deoptimization_index_; }
+ int translation_index() const { return translation_index_; }
+ int ast_id() const { return ast_id_; }
+ int parameter_count() const { return parameter_count_; }
+ LOperand** spilled_registers() const { return spilled_registers_; }
+ LOperand** spilled_double_registers() const {
+ return spilled_double_registers_;
+ }
+ const ZoneList<LOperand*>* values() const { return &values_; }
+ LEnvironment* outer() const { return outer_; }
+
+ void AddValue(LOperand* operand, Representation representation) {
+ values_.Add(operand);
+ representations_.Add(representation);
+ }
+
+ bool HasTaggedValueAt(int index) const {
+ return representations_[index].IsTagged();
+ }
+
+ void Register(int deoptimization_index, int translation_index) {
+ ASSERT(!HasBeenRegistered());
+ deoptimization_index_ = deoptimization_index;
+ translation_index_ = translation_index;
+ }
+ bool HasBeenRegistered() const {
+ return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+ }
+
+ void SetSpilledRegisters(LOperand** registers,
+ LOperand** double_registers) {
+ spilled_registers_ = registers;
+ spilled_double_registers_ = double_registers;
+ }
+
+ void PrintTo(StringStream* stream);
+
+ private:
+ Handle<JSFunction> closure_;
+ int arguments_stack_height_;
+ int deoptimization_index_;
+ int translation_index_;
+ int ast_id_;
+ int parameter_count_;
+ ZoneList<LOperand*> values_;
+ ZoneList<Representation> representations_;
+
+ // Allocation index indexed arrays of spill slot operands for registers
+ // that are also in spill slots at an OSR entry. NULL for environments
+ // that do not correspond to an OSR entry.
+ LOperand** spilled_registers_;
+ LOperand** spilled_double_registers_;
+
+ LEnvironment* outer_;
+
+ friend class LCodegen;
+};
+
} } // namespace v8::internal
#endif // V8_LITHIUM_H_
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 7ed22c84..e05c53ce 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -980,15 +980,15 @@ Debug.LiveEdit = new function() {
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
- var diff = CompareStringsLinewise(old_source, new_source);
+ var diff = CompareStrings(old_source, new_source);
return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
- function CompareStringsLinewise(s1, s2) {
- return %LiveEditCompareStringsLinewise(s1, s2);
+ function CompareStrings(s1, s2) {
+ return %LiveEditCompareStrings(s1, s2);
}
// Applies the change to the script.
@@ -1076,7 +1076,7 @@ Debug.LiveEdit = new function() {
// Functions are public for tests.
this.TestApi = {
PosTranslator: PosTranslator,
- CompareStringsLinewise: CompareStringsLinewise,
+ CompareStrings: CompareStrings,
ApplySingleChunkPatch: ApplySingleChunkPatch
}
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index c4cb68e7..b6ad4cf5 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -275,6 +275,82 @@ static bool CompareSubstrings(Handle<String> s1, int pos1,
}
+// A helper class that writes chunk numbers into JSArray.
+// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
+class CompareOutputArrayWriter {
+ public:
+ CompareOutputArrayWriter()
+ : array_(Factory::NewJSArray(10)), current_size_(0) {}
+
+ Handle<JSArray> GetResult() {
+ return array_;
+ }
+
+ void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
+ SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
+ SetElement(array_, current_size_ + 1,
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+ SetElement(array_, current_size_ + 2,
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+ current_size_ += 3;
+ }
+
+ private:
+ Handle<JSArray> array_;
+ int current_size_;
+};
+
+
+// Represents 2 strings as 2 arrays of tokens.
+// TODO(LiveEdit): Currently it's actually an array of charactres.
+// Make array of tokens instead.
+class TokensCompareInput : public Comparator::Input {
+ public:
+ TokensCompareInput(Handle<String> s1, int offset1, int len1,
+ Handle<String> s2, int offset2, int len2)
+ : s1_(s1), offset1_(offset1), len1_(len1),
+ s2_(s2), offset2_(offset2), len2_(len2) {
+ }
+ virtual int getLength1() {
+ return len1_;
+ }
+ virtual int getLength2() {
+ return len2_;
+ }
+ bool equals(int index1, int index2) {
+ return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
+ }
+
+ private:
+ Handle<String> s1_;
+ int offset1_;
+ int len1_;
+ Handle<String> s2_;
+ int offset2_;
+ int len2_;
+};
+
+
+// Stores compare result in JSArray. Converts substring positions
+// to absolute positions.
+class TokensCompareOutput : public Comparator::Output {
+ public:
+ TokensCompareOutput(CompareOutputArrayWriter* array_writer,
+ int offset1, int offset2)
+ : array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
+ }
+
+ void AddChunk(int pos1, int pos2, int len1, int len2) {
+ array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
+ }
+
+ private:
+ CompareOutputArrayWriter* array_writer_;
+ int offset1_;
+ int offset2_;
+};
+
+
// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
// never has terminating new line character.
class LineEndsWrapper {
@@ -350,13 +426,14 @@ class LineArrayCompareInput : public Comparator::Input {
};
-// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
-// (pos1_begin, pos1_end, pos2_end).
-class LineArrayCompareOutput : public Comparator::Output {
+// Stores compare result in JSArray. For each chunk tries to conduct
+// a fine-grained nested diff token-wise.
+class TokenizingLineArrayCompareOutput : public Comparator::Output {
public:
- LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
- : array_(Factory::NewJSArray(10)), current_size_(0),
- line_ends1_(line_ends1), line_ends2_(line_ends2) {
+ TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
+ LineEndsWrapper line_ends2,
+ Handle<String> s1, Handle<String> s2)
+ : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
}
void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
@@ -365,33 +442,43 @@ class LineArrayCompareOutput : public Comparator::Output {
int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
- SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
- SetElement(array_, current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
- SetElement(array_, current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
- current_size_ += 3;
+ if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
+ // Chunk is small enough to conduct a nested token-level diff.
+ HandleScope subTaskScope;
+
+ TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
+ s2_, char_pos2, char_len2);
+ TokensCompareOutput tokens_output(&array_writer_, char_pos1,
+ char_pos2);
+
+ Comparator::CalculateDifference(&tokens_input, &tokens_output);
+ } else {
+ array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
+ }
}
Handle<JSArray> GetResult() {
- return array_;
+ return array_writer_.GetResult();
}
private:
- Handle<JSArray> array_;
- int current_size_;
+ static const int CHUNK_LEN_LIMIT = 800;
+
+ CompareOutputArrayWriter array_writer_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
+ Handle<String> s1_;
+ Handle<String> s2_;
};
-Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
- Handle<String> s2) {
+Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
+ Handle<String> s2) {
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
- LineArrayCompareOutput output(line_ends1, line_ends2);
+ TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
Comparator::CalculateDifference(&input, &output);
diff --git a/src/liveedit.h b/src/liveedit.h
index 3632180f..5f2c99c3 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -126,10 +126,11 @@ class LiveEdit : AllStatic {
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
};
- // Compares 2 strings line-by-line and returns diff in form of array of
- // triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
- static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
- Handle<String> s2);
+ // Compares 2 strings line-by-line, then token-wise and returns diff in form
+ // of array of triplets (pos1, pos1_end, pos2_end) describing list
+ // of diff chunks.
+ static Handle<JSArray> CompareStrings(Handle<String> s1,
+ Handle<String> s2);
};
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8ade41cd..a946ffa2 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -30,6 +30,7 @@
#include "compilation-cache.h"
#include "execution.h"
#include "heap-profiler.h"
+#include "gdb-jit.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
@@ -125,6 +126,12 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
if (!Heap::map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (FLAG_gdbjit) {
+ // If GDBJIT interface is active disable compaction.
+ compacting_collection_ = false;
+ }
+#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
@@ -2906,6 +2913,11 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+ if (obj->IsCode()) {
+ GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
+ }
+#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(CodeDeleteEvent(obj->address()));
diff --git a/src/messages.js b/src/messages.js
index c19f4a9a..a30ef8a9 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -97,6 +97,12 @@ function ToDetailString(obj) {
var constructorName = constructor.name;
if (!constructorName) return ToString(obj);
return "#<" + GetInstanceName(constructorName) + ">";
+ } else if (obj instanceof $Error) {
+ // When formatting internally created error messages, do not
+ // invoke overwritten error toString methods but explicitly use
+ // the error to string method. This is to avoid leaking error
+ // objects between script tags in a browser setting.
+ return %_CallFunction(obj, errorToString);
} else {
return ToString(obj);
}
@@ -943,15 +949,28 @@ function DefineError(f) {
}
%FunctionSetInstanceClassName(f, 'Error');
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
- f.prototype.name = name;
+ // The name property on the prototype of error objects is not
+ // specified as being read-one and dont-delete. However, allowing
+ // overwriting allows leaks of error objects between script blocks
+ // in the same context in a browser setting. Therefore we fix the
+ // name.
+ %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
%SetCode(f, function(m) {
if (%_IsConstructCall()) {
+ // Define all the expected properties directly on the error
+ // object. This avoids going through getters and setters defined
+ // on prototype objects.
+ %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
+ %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
+ %IgnoreAttributesAndSetProperty(this, 'type', void 0);
if (m === kAddMessageAccessorsMarker) {
+ // DefineOneShotAccessor always inserts a message property and
+ // ignores setters.
DefineOneShotAccessor(this, 'message', function (obj) {
return FormatMessage({type: obj.type, args: obj.arguments});
});
} else if (!IS_UNDEFINED(m)) {
- this.message = ToString(m);
+ %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
}
captureStackTrace(this, f);
} else {
@@ -987,14 +1006,17 @@ $Error.captureStackTrace = captureStackTrace;
// Setup extra properties of the Error.prototype object.
$Error.prototype.message = '';
-%SetProperty($Error.prototype, 'toString', function toString() {
+function errorToString() {
var type = this.type;
if (type && !this.hasOwnProperty("message")) {
return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
}
- var message = this.message;
- return this.name + (message ? (": " + message) : "");
-}, DONT_ENUM);
+ var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
+ return this.name + message;
+}
+
+%FunctionSetName(errorToString, 'toString');
+%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
// Boilerplate for exceptions for stack overflows. Used from
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index a3552c71..f9c57e69 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -670,7 +670,7 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() {
int finger = Smi::cast(get(kFingerIndex))->value();
ASSERT(kEntriesIndex <= finger);
- ASSERT(finger < size || finger == kEntriesIndex);
+ ASSERT((finger < size) || (finger == kEntriesIndex && finger == size));
ASSERT_EQ(0, finger % kEntrySize);
if (FLAG_enable_slow_asserts) {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 3c9dc823..df44674a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1978,13 +1978,13 @@ void ExternalTwoByteString::set_resource(
void JSFunctionResultCache::MakeZeroSize() {
- set(kFingerIndex, Smi::FromInt(kEntriesIndex));
- set(kCacheSizeIndex, Smi::FromInt(kEntriesIndex));
+ set_finger_index(kEntriesIndex);
+ set_size(kEntriesIndex);
}
void JSFunctionResultCache::Clear() {
- int cache_size = Smi::cast(get(kCacheSizeIndex))->value();
+ int cache_size = size();
Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
MemsetPointer(entries_start,
Heap::the_hole_value(),
@@ -1993,6 +1993,26 @@ void JSFunctionResultCache::Clear() {
}
+int JSFunctionResultCache::size() {
+ return Smi::cast(get(kCacheSizeIndex))->value();
+}
+
+
+void JSFunctionResultCache::set_size(int size) {
+ set(kCacheSizeIndex, Smi::FromInt(size));
+}
+
+
+int JSFunctionResultCache::finger_index() {
+ return Smi::cast(get(kFingerIndex))->value();
+}
+
+
+void JSFunctionResultCache::set_finger_index(int finger_index) {
+ set(kFingerIndex, Smi::FromInt(finger_index));
+}
+
+
byte ByteArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2238,7 +2258,6 @@ InstanceType Map::instance_type() {
void Map::set_instance_type(InstanceType value) {
- ASSERT(0 <= value && value < 256);
WRITE_BYTE_FIELD(this, kInstanceTypeOffset, value);
}
@@ -2397,6 +2416,12 @@ InlineCacheState Code::ic_state() {
}
+Code::ExtraICState Code::extra_ic_state() {
+ ASSERT(is_inline_cache_stub());
+ return ExtractExtraICStateFromFlags(flags());
+}
+
+
PropertyType Code::type() {
ASSERT(ic_state() == MONOMORPHIC);
return ExtractTypeFromFlags(flags());
@@ -2573,14 +2598,20 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
InLoopFlag in_loop,
InlineCacheState ic_state,
+ ExtraICState extra_ic_state,
PropertyType type,
int argc,
InlineCacheHolderFlag holder) {
+ // Extra IC state is only allowed for monomorphic call IC stubs.
+ ASSERT(extra_ic_state == kNoExtraICState ||
+ (kind == CALL_IC && (ic_state == MONOMORPHIC ||
+ ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)));
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
+ bits |= extra_ic_state << kFlagsExtraICStateShift;
bits |= argc << kFlagsArgumentsCountShift;
if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
// Cast to flags and validate result before returning it.
@@ -2589,6 +2620,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
ASSERT(ExtractICStateFromFlags(result) == ic_state);
ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
ASSERT(ExtractTypeFromFlags(result) == type);
+ ASSERT(ExtractExtraICStateFromFlags(result) == extra_ic_state);
ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
return result;
}
@@ -2596,10 +2628,12 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
+ ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
InLoopFlag in_loop,
int argc) {
- return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
+ return ComputeFlags(
+ kind, in_loop, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@@ -2615,6 +2649,12 @@ InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
}
+Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
+ int bits = (flags & kFlagsExtraICStateMask) >> kFlagsExtraICStateShift;
+ return static_cast<ExtraICState>(bits);
+}
+
+
InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
int bits = (flags & kFlagsICInLoopMask);
return bits != 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -2990,13 +3030,6 @@ Code* SharedFunctionInfo::unchecked_code() {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- // If optimization has been disabled for the shared function info,
- // reflect that in the code object so it will not be counted as
- // optimizable code.
- ASSERT(value->kind() != Code::FUNCTION ||
- !value->optimizable() ||
- this->code() == Builtins::builtin(Builtins::Illegal) ||
- this->allows_lazy_compilation());
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
}
@@ -3216,28 +3249,28 @@ int JSFunction::NumberOfLiterals() {
Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
- ASSERT(0 <= id && id < kJSBuiltinsCount);
+ ASSERT(id < kJSBuiltinsCount); // id is unsigned.
return READ_FIELD(this, OffsetOfFunctionWithId(id));
}
void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Object* value) {
- ASSERT(0 <= id && id < kJSBuiltinsCount);
+ ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
}
Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
- ASSERT(0 <= id && id < kJSBuiltinsCount);
+ ASSERT(id < kJSBuiltinsCount); // id is unsigned.
return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
}
void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
- ASSERT(0 <= id && id < kJSBuiltinsCount);
+ ASSERT(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
ASSERT(!Heap::InNewSpace(value));
}
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 6510ca80..ea6d7954 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -146,7 +146,7 @@ class VisitorDispatchTable {
}
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
- ASSERT((0 <= id) && (id < StaticVisitorBase::kVisitorIdCount));
+ ASSERT(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
callbacks_[id] = callback;
}
diff --git a/src/objects.cc b/src/objects.cc
index f3f80032..36a8e5c2 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5399,7 +5399,8 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForLazyRecompilation() {
ASSERT(is_compiled() && !IsOptimized());
- ASSERT(shared()->allows_lazy_compilation());
+ ASSERT(shared()->allows_lazy_compilation() ||
+ code()->optimizable());
ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
}
@@ -5987,14 +5988,9 @@ int Code::SourceStatementPosition(Address pc) {
}
-uint8_t* Code::GetSafepointEntry(Address pc) {
+SafepointEntry Code::GetSafepointEntry(Address pc) {
SafepointTable table(this);
- unsigned pc_offset = static_cast<unsigned>(pc - instruction_start());
- for (unsigned i = 0; i < table.length(); i++) {
- // TODO(kasperl): Replace the linear search with binary search.
- if (table.GetPcOffset(i) == pc_offset) return table.GetEntry(i);
- }
- return NULL;
+ return table.FindEntry(pc);
}
@@ -6265,12 +6261,15 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "%p %4d ", (instruction_start() + pc_offset), pc_offset);
table.PrintEntry(i);
PrintF(out, " (sp -> fp)");
- int deoptimization_index = table.GetDeoptimizationIndex(i);
- if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- PrintF(out, " %6d", deoptimization_index);
+ SafepointEntry entry = table.GetEntry(i);
+ if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+ PrintF(out, " %6d", entry.deoptimization_index());
} else {
PrintF(out, " <none>");
}
+ if (entry.argument_count() > 0) {
+ PrintF(out, " argc: %d", entry.argument_count());
+ }
PrintF(out, "\n");
}
PrintF(out, "\n");
diff --git a/src/objects.h b/src/objects.h
index 063555e0..f9cab45f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -624,6 +624,71 @@ class MaybeObject BASE_EMBEDDED {
#endif
};
+
+#define OBJECT_TYPE_LIST(V) \
+ V(Smi) \
+ V(HeapObject) \
+ V(Number) \
+
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ V(HeapNumber) \
+ V(String) \
+ V(Symbol) \
+ V(SeqString) \
+ V(ExternalString) \
+ V(ConsString) \
+ V(ExternalTwoByteString) \
+ V(ExternalAsciiString) \
+ V(SeqTwoByteString) \
+ V(SeqAsciiString) \
+ \
+ V(PixelArray) \
+ V(ExternalArray) \
+ V(ExternalByteArray) \
+ V(ExternalUnsignedByteArray) \
+ V(ExternalShortArray) \
+ V(ExternalUnsignedShortArray) \
+ V(ExternalIntArray) \
+ V(ExternalUnsignedIntArray) \
+ V(ExternalFloatArray) \
+ V(ByteArray) \
+ V(JSObject) \
+ V(JSContextExtensionObject) \
+ V(Map) \
+ V(DescriptorArray) \
+ V(DeoptimizationInputData) \
+ V(DeoptimizationOutputData) \
+ V(FixedArray) \
+ V(Context) \
+ V(CatchContext) \
+ V(GlobalContext) \
+ V(JSFunction) \
+ V(Code) \
+ V(Oddball) \
+ V(SharedFunctionInfo) \
+ V(JSValue) \
+ V(StringWrapper) \
+ V(Proxy) \
+ V(Boolean) \
+ V(JSArray) \
+ V(JSRegExp) \
+ V(HashTable) \
+ V(Dictionary) \
+ V(SymbolTable) \
+ V(JSFunctionResultCache) \
+ V(NormalizedMapCache) \
+ V(CompilationCacheTable) \
+ V(CodeCacheHashTable) \
+ V(MapCache) \
+ V(Primitive) \
+ V(GlobalObject) \
+ V(JSGlobalObject) \
+ V(JSBuiltinsObject) \
+ V(JSGlobalProxy) \
+ V(UndetectableObject) \
+ V(AccessCheckNeeded) \
+ V(JSGlobalPropertyCell) \
+
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -633,67 +698,10 @@ class MaybeObject BASE_EMBEDDED {
class Object : public MaybeObject {
public:
// Type testing.
- inline bool IsSmi();
- inline bool IsHeapObject();
- inline bool IsHeapNumber();
- inline bool IsString();
- inline bool IsSymbol();
- // See objects-inl.h for more details
- inline bool IsSeqString();
- inline bool IsExternalString();
- inline bool IsExternalTwoByteString();
- inline bool IsExternalAsciiString();
- inline bool IsSeqTwoByteString();
- inline bool IsSeqAsciiString();
- inline bool IsConsString();
-
- inline bool IsNumber();
- inline bool IsByteArray();
- inline bool IsPixelArray();
- inline bool IsExternalArray();
- inline bool IsExternalByteArray();
- inline bool IsExternalUnsignedByteArray();
- inline bool IsExternalShortArray();
- inline bool IsExternalUnsignedShortArray();
- inline bool IsExternalIntArray();
- inline bool IsExternalUnsignedIntArray();
- inline bool IsExternalFloatArray();
- inline bool IsJSObject();
- inline bool IsJSContextExtensionObject();
- inline bool IsMap();
- inline bool IsFixedArray();
- inline bool IsDescriptorArray();
- inline bool IsDeoptimizationInputData();
- inline bool IsDeoptimizationOutputData();
- inline bool IsContext();
- inline bool IsCatchContext();
- inline bool IsGlobalContext();
- inline bool IsJSFunction();
- inline bool IsCode();
- inline bool IsOddball();
- inline bool IsSharedFunctionInfo();
- inline bool IsJSValue();
- inline bool IsStringWrapper();
- inline bool IsProxy();
- inline bool IsBoolean();
- inline bool IsJSArray();
- inline bool IsJSRegExp();
- inline bool IsHashTable();
- inline bool IsDictionary();
- inline bool IsSymbolTable();
- inline bool IsJSFunctionResultCache();
- inline bool IsNormalizedMapCache();
- inline bool IsCompilationCacheTable();
- inline bool IsCodeCacheHashTable();
- inline bool IsMapCache();
- inline bool IsPrimitive();
- inline bool IsGlobalObject();
- inline bool IsJSGlobalObject();
- inline bool IsJSBuiltinsObject();
- inline bool IsJSGlobalProxy();
- inline bool IsUndetectableObject();
- inline bool IsAccessCheckNeeded();
- inline bool IsJSGlobalPropertyCell();
+#define IS_TYPE_FUNCTION_DECL(type_) inline bool Is##type_();
+ OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
// Returns true if this object is an instance of the specified
// function template.
@@ -2613,6 +2621,11 @@ class JSFunctionResultCache: public FixedArray {
inline void MakeZeroSize();
inline void Clear();
+ inline int size();
+ inline void set_size(int size);
+ inline int finger_index();
+ inline void set_finger_index(int finger_index);
+
// Casting
static inline JSFunctionResultCache* cast(Object* obj);
@@ -3121,6 +3134,9 @@ class DeoptimizationOutputData: public FixedArray {
};
+class SafepointEntry;
+
+
// Code describes objects with on-the-fly generated machine code.
class Code: public HeapObject {
public:
@@ -3160,6 +3176,10 @@ class Code: public HeapObject {
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
+ typedef int ExtraICState;
+
+ static const ExtraICState kNoExtraICState = 0;
+
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* Kind2String(Kind kind);
@@ -3195,6 +3215,7 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
+ inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
inline InLoopFlag ic_in_loop(); // Only valid for IC stubs.
inline PropertyType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
@@ -3268,9 +3289,8 @@ class Code: public HeapObject {
inline byte compare_state();
inline void set_compare_state(byte value);
- // Get the safepoint entry for the given pc. Returns NULL for
- // non-safepoint pcs.
- uint8_t* GetSafepointEntry(Address pc);
+ // Get the safepoint entry for the given pc.
+ SafepointEntry GetSafepointEntry(Address pc);
// Mark this code object as not having a stack check table. Assumes kind
// is FUNCTION.
@@ -3280,22 +3300,26 @@ class Code: public HeapObject {
Map* FindFirstMap();
// Flags operations.
- static inline Flags ComputeFlags(Kind kind,
- InLoopFlag in_loop = NOT_IN_LOOP,
- InlineCacheState ic_state = UNINITIALIZED,
- PropertyType type = NORMAL,
- int argc = -1,
- InlineCacheHolderFlag holder = OWN_MAP);
+ static inline Flags ComputeFlags(
+ Kind kind,
+ InLoopFlag in_loop = NOT_IN_LOOP,
+ InlineCacheState ic_state = UNINITIALIZED,
+ ExtraICState extra_ic_state = kNoExtraICState,
+ PropertyType type = NORMAL,
+ int argc = -1,
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
PropertyType type,
+ ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
InLoopFlag in_loop = NOT_IN_LOOP,
int argc = -1);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
+ static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
@@ -3416,14 +3440,16 @@ class Code: public HeapObject {
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
static const int kFlagsICHolderShift = 11;
- static const int kFlagsArgumentsCountShift = 12;
+ static const int kFlagsExtraICStateShift = 12;
+ static const int kFlagsArgumentsCountShift = 14;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
- static const int kFlagsArgumentsCountMask = 0xFFFFF000;
+ static const int kFlagsExtraICStateMask = 0x00003000;
+ static const int kFlagsArgumentsCountMask = 0xFFFFC000;
static const int kFlagsNotUsedInLookup =
(kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
diff --git a/src/parser.cc b/src/parser.cc
index 5ea1c5e0..6ad9ab31 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -600,7 +600,8 @@ Parser::Parser(Handle<Script> script,
extension_(extension),
pre_data_(pre_data),
fni_(NULL),
- stack_overflow_(false) {
+ stack_overflow_(false),
+ parenthesized_function_(false) {
AstNode::ResetIds();
}
@@ -777,7 +778,8 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
const char* type,
Vector<const char*> args) {
MessageLocation location(script_,
- source_location.beg_pos, source_location.end_pos);
+ source_location.beg_pos,
+ source_location.end_pos);
Handle<JSArray> array = Factory::NewJSArray(args.length());
for (int i = 0; i < args.length(); i++) {
SetElement(array, i, Factory::NewStringFromUtf8(CStrVector(args[i])));
@@ -787,6 +789,21 @@ void Parser::ReportMessageAt(Scanner::Location source_location,
}
+void Parser::ReportMessageAt(Scanner::Location source_location,
+ const char* type,
+ Vector<Handle<String> > args) {
+ MessageLocation location(script_,
+ source_location.beg_pos,
+ source_location.end_pos);
+ Handle<JSArray> array = Factory::NewJSArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ SetElement(array, i, args[i]);
+ }
+ Handle<Object> result = Factory::NewSyntaxError(type, array);
+ Top::Throw(*result, &location);
+}
+
+
// Base class containing common code for the different finder classes used by
// the parser.
class ParserFinder {
@@ -1692,12 +1709,16 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
IterationStatement* target = NULL;
target = LookupContinueTarget(label, CHECK_OK);
if (target == NULL) {
- // Illegal continue statement. To be consistent with KJS we delay
- // reporting of the syntax error until runtime.
- Handle<String> error_type = Factory::illegal_continue_symbol();
- if (!label.is_null()) error_type = Factory::unknown_label_symbol();
- Expression* throw_error = NewThrowSyntaxError(error_type, label);
- return new ExpressionStatement(throw_error);
+ // Illegal continue statement.
+ const char* message = "illegal_continue";
+ Vector<Handle<String> > args;
+ if (!label.is_null()) {
+ message = "unknown_label";
+ args = Vector<Handle<String> >(&label, 1);
+ }
+ ReportMessageAt(scanner().location(), message, args);
+ *ok = false;
+ return NULL;
}
ExpectSemicolon(CHECK_OK);
return new ContinueStatement(target);
@@ -1723,12 +1744,16 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
BreakableStatement* target = NULL;
target = LookupBreakTarget(label, CHECK_OK);
if (target == NULL) {
- // Illegal break statement. To be consistent with KJS we delay
- // reporting of the syntax error until runtime.
- Handle<String> error_type = Factory::illegal_break_symbol();
- if (!label.is_null()) error_type = Factory::unknown_label_symbol();
- Expression* throw_error = NewThrowSyntaxError(error_type, label);
- return new ExpressionStatement(throw_error);
+ // Illegal break statement.
+ const char* message = "illegal_break";
+ Vector<Handle<String> > args;
+ if (!label.is_null()) {
+ message = "unknown_label";
+ args = Vector<Handle<String> >(&label, 1);
+ }
+ ReportMessageAt(scanner().location(), message, args);
+ *ok = false;
+ return NULL;
}
ExpectSemicolon(CHECK_OK);
return new BreakStatement(target);
@@ -2482,9 +2507,13 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// The calls that need special treatment are the
// direct (i.e. not aliased) eval calls. These calls are all of the
// form eval(...) with no explicit receiver object where eval is not
- // declared in the current scope chain. These calls are marked as
- // potentially direct eval calls. Whether they are actually direct calls
- // to eval is determined at run time.
+ // declared in the current scope chain.
+ // These calls are marked as potentially direct eval calls. Whether
+ // they are actually direct calls to eval is determined at run time.
+ // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
+ // in the local scope chain. It only matters that it's called "eval",
+ // is called without a receiver and it refers to the original eval
+ // function.
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL && callee->IsVariable(Factory::eval_symbol())) {
Handle<String> name = callee->name();
@@ -2734,6 +2763,9 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::LPAREN:
Consume(Token::LPAREN);
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
break;
@@ -3225,8 +3257,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// Determine if the function will be lazily compiled. The mode can
// only be PARSE_LAZILY if the --lazy flag is true.
- bool is_lazily_compiled =
- mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+ bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
+ top_scope_->outer_scope()->is_global_scope() &&
+ top_scope_->HasTrivialOuterContext() &&
+ !parenthesized_function_);
+ parenthesized_function_ = false; // The bit was set for this function only.
int function_block_pos = scanner().location().beg_pos;
int materialized_literal_count;
diff --git a/src/parser.h b/src/parser.h
index 1dfc1533..0613a8de 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -430,6 +430,9 @@ class Parser {
void ReportMessageAt(Scanner::Location loc,
const char* message,
Vector<const char*> args);
+ void ReportMessageAt(Scanner::Location loc,
+ const char* message,
+ Vector<Handle<String> > args);
protected:
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
@@ -682,6 +685,11 @@ class Parser {
ScriptDataImpl* pre_data_;
FuncNameInferrer* fni_;
bool stack_overflow_;
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ // Heuristically that means that the function will be called immediately,
+ // so never lazily compile it.
+ bool parenthesized_function_;
};
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 04c25a90..dc4493aa 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -45,6 +45,7 @@
#include <errno.h>
#include <ieeefp.h> // finite()
#include <signal.h> // sigemptyset(), etc
+#include <sys/kdi_regs.h>
#undef MAP_TYPE
@@ -493,6 +494,16 @@ class SolarisMutex : public Mutex {
int Unlock() { return pthread_mutex_unlock(&mutex_); }
+ virtual bool TryLock() {
+ int result = pthread_mutex_trylock(&mutex_);
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
+ }
+
private:
pthread_mutex_t mutex_;
};
@@ -584,21 +595,37 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
+static pthread_t vm_tid_ = 0;
+
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
- sample.pc = 0;
- sample.sp = 0;
- sample.fp = 0;
-
- // We always sample the VM state.
- sample.state = VMState::current_state();
-
- active_sampler_->Tick(&sample);
+ if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
+ if (vm_tid_ != pthread_self()) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+ sample->state = Top::current_vm_state();
+
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RBP]);
+#else
+ UNIMPLEMENTED();
+#endif
+ active_sampler_->SampleStack(sample);
+ active_sampler_->Tick(sample);
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 81216e19..bf1737a5 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1474,7 +1474,7 @@ Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
void Thread::set_name(const char* name) {
- strncpy_s(name_, name, sizeof(name_));
+ OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name));
name_[sizeof(name_) - 1] = '\0';
}
diff --git a/src/preparse-data.h b/src/preparse-data.h
index cc82bcc6..bb5707b6 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -39,7 +39,7 @@ class PreparseDataConstants : public AllStatic {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 5;
+ static const unsigned kCurrentVersion = 6;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index dba30265..3817935f 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -69,8 +69,12 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
}
}
- virtual void PushBack(uc16 ch) {
+ virtual void PushBack(uc32 ch) {
ASSERT(pos_ > 0);
+ if (ch == kEndOfInput) {
+ pos_--;
+ return;
+ }
if (buffer_cursor_ <= pushback_buffer_) {
// No more room in the current buffer to do pushbacks.
if (pushback_buffer_end_cache_ == NULL) {
@@ -98,7 +102,8 @@ class InputStreamUTF16Buffer : public UC16CharacterStream {
buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
}
}
- pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = ch;
+ pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] =
+ static_cast<uc16>(ch);
pos_--;
}
diff --git a/src/preparser.cc b/src/preparser.cc
index e05f9037..c0dcc0b4 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -1,3 +1,4 @@
+
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
@@ -894,6 +895,7 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
case i::Token::LPAREN:
Consume(i::Token::LPAREN);
+ parenthesized_function_ = (peek() == i::Token::FUNCTION);
result = ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, CHECK_OK);
if (result == kIdentifierExpression) result = kUnknownExpression;
@@ -1071,8 +1073,10 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
// Determine if the function will be lazily compiled.
// Currently only happens to top-level functions.
// Optimistically assume that all top-level functions are lazily compiled.
- bool is_lazily_compiled =
- (outer_scope_type == kTopLevelScope && !inside_with && allow_lazy_);
+ bool is_lazily_compiled = (outer_scope_type == kTopLevelScope &&
+ !inside_with && allow_lazy_ &&
+ !parenthesized_function_);
+ parenthesized_function_ = false;
if (is_lazily_compiled) {
log_->PauseRecording();
diff --git a/src/preparser.h b/src/preparser.h
index 536e6d4f..66fad3bc 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -144,7 +144,8 @@ class PreParser {
scope_(NULL),
stack_limit_(stack_limit),
stack_overflow_(false),
- allow_lazy_(true) { }
+ allow_lazy_(true),
+ parenthesized_function_(false) { }
// Preparse the program. Only called in PreParseProgram after creating
// the instance.
@@ -268,6 +269,7 @@ class PreParser {
uintptr_t stack_limit_;
bool stack_overflow_;
bool allow_lazy_;
+ bool parenthesized_function_;
};
} } // v8::preparser
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 34d18771..4476cb87 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2385,7 +2385,7 @@ bool HeapSnapshotGenerator::IterateAndExtractReferences() {
if (interrupted) return false;
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
- Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ Heap::IterateRoots(&extractor, VISIT_ALL);
return ReportProgress();
}
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 6c9c2eb0..9deea86f 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -76,18 +76,18 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 463c1a81..fa2c6579 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -213,7 +213,7 @@ void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
}
-void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
+void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
c, LabelToInt(on_equal));
assembler_->CheckCharacter(c, on_equal);
@@ -232,7 +232,7 @@ void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
}
-void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
+void RegExpMacroAssemblerTracer::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
c, LabelToInt(on_not_equal));
@@ -241,8 +241,8 @@ void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
+ unsigned c,
+ unsigned mask,
Label* on_equal) {
PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
@@ -253,8 +253,8 @@ void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
+ unsigned c,
+ unsigned mask,
Label* on_not_equal) {
PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c,
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 6a8f4d47..1fb6d544 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -43,9 +43,9 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t and_with,
+ virtual void CheckCharacter(unsigned c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned and_with,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -60,9 +60,9 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t and_with,
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned and_with,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index dc3bd824..ef85d27e 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -73,11 +73,11 @@ class RegExpMacroAssembler {
virtual void CheckAtStart(Label* on_at_start) = 0;
// Dispatch after looking the current character up in a 2-bits-per-entry
// map. The destinations vector has up to 4 labels.
- virtual void CheckCharacter(uint32_t c, Label* on_equal) = 0;
+ virtual void CheckCharacter(unsigned c, Label* on_equal) = 0;
// Bitwise and the current character with the given constant and then
// check for a match with c.
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t and_with,
+ virtual void CheckCharacterAfterAnd(unsigned c,
+ unsigned and_with,
Label* on_equal) = 0;
virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
@@ -101,9 +101,9 @@ class RegExpMacroAssembler {
// fail to match then goto the on_failure label. End of input always
// matches. If the label is NULL then we should pop a backtrack address off
// the stack and go to that.
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal) = 0;
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t and_with,
+ virtual void CheckNotCharacter(unsigned c, Label* on_not_equal) = 0;
+ virtual void CheckNotCharacterAfterAnd(unsigned c,
+ unsigned and_with,
Label* on_not_equal) = 0;
// Subtract a constant from the current character, then or with the given
// constant and then check for a match with c.
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 3d737a49..fd40cdc3 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -978,7 +978,7 @@ void Processor::VisitThisFunction(ThisFunction* node) {
}
-// Assumes code has been parsed and scopes hve been analyzed. Mutates the
+// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
bool Rewriter::Rewrite(CompilationInfo* info) {
FunctionLiteral* function = info->function();
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index c53ddd2b..d7792ace 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -134,6 +134,7 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
static bool IsOptimizable(JSFunction* function) {
+ if (Heap::InNewSpace(function)) return false;
Code* code = function->code();
return code->kind() == Code::FUNCTION && code->optimizable();
}
@@ -165,8 +166,10 @@ static void AttemptOnStackReplacement(JSFunction* function) {
}
SharedFunctionInfo* shared = function->shared();
- // If the code is not optimizable, don't try OSR.
- if (!shared->code()->optimizable()) return;
+ // If the code is not optimizable or references context slots, don't try OSR.
+ if (!shared->code()->optimizable() || !shared->allows_lazy_compilation()) {
+ return;
+ }
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
diff --git a/src/runtime.cc b/src/runtime.cc
index 2aa44312..2f1f54c6 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1749,6 +1749,7 @@ static MaybeObject* Runtime_SetCode(Arguments args) {
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
target->shared()->set_script(Heap::undefined_value());
+ target->shared()->code()->set_optimizable(false);
// Clear the optimization hints related to the compiled code as these are no
// longer valid when the code is overwritten.
target->shared()->ClearThisPropertyAssignmentsInfo();
@@ -4621,12 +4622,12 @@ MaybeObject* AllocateRawString<SeqAsciiString>(int length) {
}
-template <typename Char, typename StringType>
+template <typename Char, typename StringType, bool comma>
static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
int length = characters.length();
const Char* read_cursor = characters.start();
const Char* end = read_cursor + length;
- const int kSpaceForQuotes = 2;
+ const int kSpaceForQuotes = 2 + (comma ? 1 :0);
int quoted_length = kSpaceForQuotes;
while (read_cursor < end) {
Char c = *(read_cursor++);
@@ -4645,6 +4646,7 @@ static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
Char* write_cursor = reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize);
+ if (comma) *(write_cursor++) = ',';
*(write_cursor++) = '"';
read_cursor = characters.start();
@@ -4666,14 +4668,14 @@ static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
}
-template <typename Char, typename StringType>
+template <typename Char, typename StringType, bool comma>
static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
int length = characters.length();
Counters::quote_json_char_count.Increment(length);
- const int kSpaceForQuotes = 2;
+ const int kSpaceForQuotes = 2 + (comma ? 1 :0);
int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
if (worst_case_length > kMaxGuaranteedNewSpaceString) {
- return SlowQuoteJsonString<Char, StringType>(characters);
+ return SlowQuoteJsonString<Char, StringType, comma>(characters);
}
MaybeObject* new_alloc = AllocateRawString<StringType>(worst_case_length);
@@ -4686,7 +4688,7 @@ static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
// handle it being allocated in old space as may happen in the third
// attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
// CEntryStub::GenerateCore.
- return SlowQuoteJsonString<Char, StringType>(characters);
+ return SlowQuoteJsonString<Char, StringType, comma>(characters);
}
StringType* new_string = StringType::cast(new_object);
ASSERT(Heap::new_space()->Contains(new_string));
@@ -4694,6 +4696,7 @@ static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
Char* write_cursor = reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize);
+ if (comma) *(write_cursor++) = ',';
*(write_cursor++) = '"';
const Char* read_cursor = characters.start();
@@ -4744,13 +4747,32 @@ static MaybeObject* Runtime_QuoteJSONString(Arguments args) {
ASSERT(str->IsFlat());
}
if (str->IsTwoByteRepresentation()) {
- return QuoteJsonString<uc16, SeqTwoByteString>(str->ToUC16Vector());
+ return QuoteJsonString<uc16, SeqTwoByteString, false>(str->ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString>(str->ToAsciiVector());
+ return QuoteJsonString<char, SeqAsciiString, false>(str->ToAsciiVector());
}
}
+static MaybeObject* Runtime_QuoteJSONStringComma(Arguments args) {
+ NoHandleAllocation ha;
+ CONVERT_CHECKED(String, str, args[0]);
+ if (!str->IsFlat()) {
+ MaybeObject* try_flatten = str->TryFlatten();
+ Object* flat;
+ if (!try_flatten->ToObject(&flat)) {
+ return try_flatten;
+ }
+ str = String::cast(flat);
+ ASSERT(str->IsFlat());
+ }
+ if (str->IsTwoByteRepresentation()) {
+ return QuoteJsonString<uc16, SeqTwoByteString, true>(str->ToUC16Vector());
+ } else {
+ return QuoteJsonString<char, SeqAsciiString, true>(str->ToAsciiVector());
+ }
+}
+
static MaybeObject* Runtime_StringParseInt(Arguments args) {
NoHandleAllocation ha;
@@ -6714,12 +6736,24 @@ static MaybeObject* Runtime_LazyRecompile(Arguments args) {
// code from the full compiler.
if (!function->shared()->code()->optimizable() ||
Debug::has_break_points()) {
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
+ function->shared()->code()->optimizable() ? "T" : "F",
+ Debug::has_break_points() ? "T" : "F");
+ }
function->ReplaceCode(function->shared()->code());
return function->code();
}
if (CompileOptimized(function, AstNode::kNoNumber)) {
return function->code();
}
+ if (FLAG_trace_opt) {
+ PrintF("[failed to optimize ");
+ function->PrintName();
+ PrintF(": optimized compilation failed]\n");
+ }
function->ReplaceCode(function->shared()->code());
return Failure::Exception();
}
@@ -10334,15 +10368,16 @@ static MaybeObject* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
}
-// Compares 2 strings line-by-line and returns diff in form of JSArray of
-// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStringsLinewise(Arguments args) {
+// Compares 2 strings line-by-line, then token-wise and returns diff in form
+// of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
+// of diff chunks.
+static MaybeObject* Runtime_LiveEditCompareStrings(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
CONVERT_ARG_CHECKED(String, s1, 0);
CONVERT_ARG_CHECKED(String, s2, 1);
- return *LiveEdit::CompareStringsLinewise(s1, s2);
+ return *LiveEdit::CompareStrings(s1, s2);
}
@@ -10619,51 +10654,12 @@ static MaybeObject* Runtime_Abort(Arguments args) {
}
-MUST_USE_RESULT static MaybeObject* CacheMiss(FixedArray* cache_obj,
- int index,
- Object* key_obj) {
- ASSERT(index % 2 == 0); // index of the key
- ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
- ASSERT(index < cache_obj->length());
-
- HandleScope scope;
-
- Handle<FixedArray> cache(cache_obj);
- Handle<Object> key(key_obj);
- Handle<JSFunction> factory(JSFunction::cast(
- cache->get(JSFunctionResultCache::kFactoryIndex)));
- // TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(Top::global_context()->global());
-
- Handle<Object> value;
- {
- // This handle is nor shared, nor used later, so it's safe.
- Object** argv[] = { key.location() };
- bool pending_exception = false;
- value = Execution::Call(factory,
- receiver,
- 1,
- argv,
- &pending_exception);
- if (pending_exception) return Failure::Exception();
- }
-
- cache->set(index, *key);
- cache->set(index + 1, *value);
- cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(index));
-
- return *value;
-}
-
-
static MaybeObject* Runtime_GetFromCache(Arguments args) {
// This is only called from codegen, so checks might be more lax.
- CONVERT_CHECKED(FixedArray, cache, args[0]);
+ CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
Object* key = args[1];
- const int finger_index =
- Smi::cast(cache->get(JSFunctionResultCache::kFingerIndex))->value();
-
+ int finger_index = cache->finger_index();
Object* o = cache->get(finger_index);
if (o == key) {
// The fastest case: hit the same place again.
@@ -10675,35 +10671,78 @@ static MaybeObject* Runtime_GetFromCache(Arguments args) {
i -= 2) {
o = cache->get(i);
if (o == key) {
- cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
+ cache->set_finger_index(i);
return cache->get(i + 1);
}
}
- const int size =
- Smi::cast(cache->get(JSFunctionResultCache::kCacheSizeIndex))->value();
+ int size = cache->size();
ASSERT(size <= cache->length());
for (int i = size - 2; i > finger_index; i -= 2) {
o = cache->get(i);
if (o == key) {
- cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
+ cache->set_finger_index(i);
return cache->get(i + 1);
}
}
- // Cache miss. If we have spare room, put new data into it, otherwise
- // evict post finger entry which must be least recently used.
- if (size < cache->length()) {
- cache->set(JSFunctionResultCache::kCacheSizeIndex, Smi::FromInt(size + 2));
- return CacheMiss(cache, size, key);
+ // There is no value in the cache. Invoke the function and cache result.
+ HandleScope scope;
+
+ Handle<JSFunctionResultCache> cache_handle(cache);
+ Handle<Object> key_handle(key);
+ Handle<Object> value;
+ {
+ Handle<JSFunction> factory(JSFunction::cast(
+ cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
+ // TODO(antonm): consider passing a receiver when constructing a cache.
+ Handle<Object> receiver(Top::global_context()->global());
+ // This handle is nor shared, nor used later, so it's safe.
+ Object** argv[] = { key_handle.location() };
+ bool pending_exception = false;
+ value = Execution::Call(factory,
+ receiver,
+ 1,
+ argv,
+ &pending_exception);
+ if (pending_exception) return Failure::Exception();
+ }
+
+#ifdef DEBUG
+ cache_handle->JSFunctionResultCacheVerify();
+#endif
+
+ // Function invocation may have cleared the cache. Reread all the data.
+ finger_index = cache_handle->finger_index();
+ size = cache_handle->size();
+
+ // If we have spare room, put new data into it, otherwise evict post finger
+ // entry which is likely to be the least recently used.
+ int index = -1;
+ if (size < cache_handle->length()) {
+ cache_handle->set_size(size + JSFunctionResultCache::kEntrySize);
+ index = size;
} else {
- int target_index = finger_index + JSFunctionResultCache::kEntrySize;
- if (target_index == cache->length()) {
- target_index = JSFunctionResultCache::kEntriesIndex;
+ index = finger_index + JSFunctionResultCache::kEntrySize;
+ if (index == cache_handle->length()) {
+ index = JSFunctionResultCache::kEntriesIndex;
}
- return CacheMiss(cache, target_index, key);
}
+
+ ASSERT(index % 2 == 0);
+ ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
+ ASSERT(index < cache_handle->length());
+
+ cache_handle->set(index, *key_handle);
+ cache_handle->set(index + 1, *value);
+ cache_handle->set_finger_index(index);
+
+#ifdef DEBUG
+ cache_handle->JSFunctionResultCacheVerify();
+#endif
+
+ return *value;
}
#ifdef DEBUG
diff --git a/src/runtime.h b/src/runtime.h
index 2fa74386..dbd8d644 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -106,6 +106,7 @@ namespace internal {
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
F(QuoteJSONString, 1, 1) \
+ F(QuoteJSONStringComma, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -361,7 +362,7 @@ namespace internal {
F(LiveEditReplaceRefToNestedFunction, 3, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckAndDropActivations, 2, 1) \
- F(LiveEditCompareStringsLinewise, 2, 1) \
+ F(LiveEditCompareStrings, 2, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 2, 1) \
\
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index b9468a50..e79dcff0 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -26,11 +26,34 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "safepoint-table.h"
+
#include "disasm.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
+
+bool SafepointEntry::HasRegisters() const {
+ ASSERT(is_valid());
+ ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+ const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+ for (int i = 0; i < num_reg_bytes; i++) {
+ if (bits_[i] != SafepointTable::kNoRegisters) return true;
+ }
+ return false;
+}
+
+
+bool SafepointEntry::HasRegisterAt(int reg_index) const {
+ ASSERT(is_valid());
+ ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
+ int byte_index = reg_index >> kBitsPerByteLog2;
+ int bit_index = reg_index & (kBitsPerByte - 1);
+ return (bits_[byte_index] & (1 << bit_index)) != 0;
+}
+
+
SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code;
@@ -41,45 +64,39 @@ SafepointTable::SafepointTable(Code* code) {
entries_ = pc_and_deoptimization_indexes_ +
(length_ * kPcAndDeoptimizationIndexSize);
ASSERT(entry_size_ > 0);
- ASSERT_EQ(DeoptimizationIndexField::max(), Safepoint::kNoDeoptimizationIndex);
+ ASSERT_EQ(SafepointEntry::DeoptimizationIndexField::max(),
+ Safepoint::kNoDeoptimizationIndex);
}
-bool SafepointTable::HasRegisters(uint8_t* entry) {
- ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int i = 0; i < num_reg_bytes; i++) {
- if (entry[i] != kNoRegisters) return true;
+SafepointEntry SafepointTable::FindEntry(Address pc) const {
+ unsigned pc_offset = static_cast<unsigned>(pc - code_->instruction_start());
+ for (unsigned i = 0; i < length(); i++) {
+ // TODO(kasperl): Replace the linear search with binary search.
+ if (GetPcOffset(i) == pc_offset) return GetEntry(i);
}
- return false;
-}
-
-
-bool SafepointTable::HasRegisterAt(uint8_t* entry, int reg_index) {
- ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
- int byte_index = reg_index >> kBitsPerByteLog2;
- int bit_index = reg_index & (kBitsPerByte - 1);
- return (entry[byte_index] & (1 << bit_index)) != 0;
+ return SafepointEntry();
}
void SafepointTable::PrintEntry(unsigned index) const {
disasm::NameConverter converter;
- uint8_t* entry = GetEntry(index);
+ SafepointEntry entry = GetEntry(index);
+ uint8_t* bits = entry.bits();
// Print the stack slot bits.
if (entry_size_ > 0) {
ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(entry[i], kBitsPerByte);
+ for (int i = first; i < last; i++) PrintBits(bits[i], kBitsPerByte);
int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
- PrintBits(entry[last], last_bits);
+ PrintBits(bits[last], last_bits);
// Print the registers (if any).
- if (!HasRegisters(entry)) return;
+ if (!entry.HasRegisters()) return;
for (int j = 0; j < kNumSafepointRegisters; j++) {
- if (HasRegisterAt(entry, j)) {
+ if (entry.HasRegisterAt(j)) {
PrintF(" | %s", converter.NameOfCPURegister(j));
}
}
@@ -95,6 +112,11 @@ void SafepointTable::PrintBits(uint8_t byte, int digits) {
}
+void Safepoint::DefinePointerRegister(Register reg) {
+ registers_->Add(reg.code());
+}
+
+
Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
int deoptimization_index) {
ASSERT(deoptimization_index != -1);
@@ -102,6 +124,8 @@ Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+ pc_and_deoptimization_index.arguments = 0;
+ pc_and_deoptimization_index.has_doubles = false;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(NULL);
@@ -112,11 +136,13 @@ Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
Assembler* assembler, int arguments, int deoptimization_index) {
ASSERT(deoptimization_index != -1);
- ASSERT(arguments == 0); // Only case that works for now.
+ ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+ pc_and_deoptimization_index.arguments = arguments;
+ pc_and_deoptimization_index.has_doubles = false;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(new ZoneList<int>(4));
@@ -124,6 +150,22 @@ Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
}
+Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
+ Assembler* assembler, int arguments, int deoptimization_index) {
+ ASSERT(deoptimization_index != -1);
+ ASSERT(arguments >= 0);
+ DeoptimizationInfo pc_and_deoptimization_index;
+ pc_and_deoptimization_index.pc = assembler->pc_offset();
+ pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+ pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+ pc_and_deoptimization_index.arguments = arguments;
+ pc_and_deoptimization_index.has_doubles = true;
+ deoptimization_info_.Add(pc_and_deoptimization_index);
+ indexes_.Add(new ZoneList<int>(8));
+ registers_.Add(new ZoneList<int>(4));
+ return Safepoint(indexes_.last(), registers_.last());
+}
+
unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_);
return offset_;
@@ -152,7 +194,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// pc after gap information.
for (int i = 0; i < length; i++) {
assembler->dd(deoptimization_info_[i].pc);
- assembler->dd(EncodeDeoptimizationIndexAndGap(deoptimization_info_[i]));
+ assembler->dd(EncodeExceptPC(deoptimization_info_[i]));
}
// Emit table of bitmaps.
@@ -197,12 +239,13 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
}
-uint32_t SafepointTableBuilder::EncodeDeoptimizationIndexAndGap(
- DeoptimizationInfo info) {
+uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
unsigned index = info.deoptimization_index;
unsigned gap_size = info.pc_after_gap - info.pc;
- uint32_t encoding = SafepointTable::DeoptimizationIndexField::encode(index);
- encoding |= SafepointTable::GapCodeSizeField::encode(gap_size);
+ uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index);
+ encoding |= SafepointEntry::GapCodeSizeField::encode(gap_size);
+ encoding |= SafepointEntry::ArgumentsField::encode(info.arguments);
+ encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles);
return encoding;
}
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 010ac575..d7030514 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -30,13 +30,89 @@
#include "v8.h"
-#include "macro-assembler.h"
+#include "heap.h"
#include "zone.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
+struct Register;
+
+class SafepointEntry BASE_EMBEDDED {
+ public:
+ SafepointEntry() : info_(0), bits_(NULL) {}
+
+ SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
+ ASSERT(is_valid());
+ }
+
+ bool is_valid() const { return bits_ != NULL; }
+
+ bool Equals(const SafepointEntry& other) const {
+ return info_ == other.info_ && bits_ == other.bits_;
+ }
+
+ void Reset() {
+ info_ = 0;
+ bits_ = NULL;
+ }
+
+ int deoptimization_index() const {
+ ASSERT(is_valid());
+ return DeoptimizationIndexField::decode(info_);
+ }
+
+ int gap_code_size() const {
+ ASSERT(is_valid());
+ return GapCodeSizeField::decode(info_);
+ }
+
+ int argument_count() const {
+ ASSERT(is_valid());
+ return ArgumentsField::decode(info_);
+ }
+
+ bool has_doubles() const {
+ ASSERT(is_valid());
+ return SaveDoublesField::decode(info_);
+ }
+
+ uint8_t* bits() {
+ ASSERT(is_valid());
+ return bits_;
+ }
+
+ bool HasRegisters() const;
+ bool HasRegisterAt(int reg_index) const;
+
+ // Reserve 13 bits for the gap code size. On ARM a constant pool can be
+ // emitted when generating the gap code. The size of the const pool is less
+ // than what can be represented in 12 bits, so 13 bits gives room for having
+ // instructions before potentially emitting a constant pool.
+ static const int kGapCodeSizeBits = 13;
+ static const int kArgumentsFieldBits = 3;
+ static const int kSaveDoublesFieldBits = 1;
+ static const int kDeoptIndexBits =
+ 32 - kGapCodeSizeBits - kArgumentsFieldBits - kSaveDoublesFieldBits;
+ class GapCodeSizeField: public BitField<unsigned, 0, kGapCodeSizeBits> {};
+ class DeoptimizationIndexField: public BitField<int,
+ kGapCodeSizeBits,
+ kDeoptIndexBits> {}; // NOLINT
+ class ArgumentsField: public BitField<unsigned,
+ kGapCodeSizeBits + kDeoptIndexBits,
+ kArgumentsFieldBits> {}; // NOLINT
+ class SaveDoublesField: public BitField<bool,
+ kGapCodeSizeBits + kDeoptIndexBits +
+ kArgumentsFieldBits,
+ kSaveDoublesFieldBits> { }; // NOLINT
+
+ private:
+ unsigned info_;
+ uint8_t* bits_;
+};
+
+
class SafepointTable BASE_EMBEDDED {
public:
explicit SafepointTable(Code* code);
@@ -52,28 +128,15 @@ class SafepointTable BASE_EMBEDDED {
return Memory::uint32_at(GetPcOffsetLocation(index));
}
- int GetDeoptimizationIndex(unsigned index) const {
- ASSERT(index < length_);
- unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
- return DeoptimizationIndexField::decode(value);
- }
-
- unsigned GetGapCodeSize(unsigned index) const {
+ SafepointEntry GetEntry(unsigned index) const {
ASSERT(index < length_);
- unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
- return GapCodeSizeField::decode(value);
+ unsigned info = Memory::uint32_at(GetInfoLocation(index));
+ uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
+ return SafepointEntry(info, bits);
}
- uint8_t* GetEntry(unsigned index) const {
- ASSERT(index < length_);
- return &Memory::uint8_at(entries_ + (index * entry_size_));
- }
-
- class GapCodeSizeField: public BitField<unsigned, 0, 8> {};
- class DeoptimizationIndexField: public BitField<int, 8, 24> {};
-
- static bool HasRegisters(uint8_t* entry);
- static bool HasRegisterAt(uint8_t* entry, int reg_index);
+ // Returns the entry for the given pc.
+ SafepointEntry FindEntry(Address pc) const;
void PrintEntry(unsigned index) const;
@@ -94,7 +157,7 @@ class SafepointTable BASE_EMBEDDED {
(index * kPcAndDeoptimizationIndexSize);
}
- Address GetDeoptimizationLocation(unsigned index) const {
+ Address GetInfoLocation(unsigned index) const {
return GetPcOffsetLocation(index) + kPcSize;
}
@@ -109,15 +172,19 @@ class SafepointTable BASE_EMBEDDED {
Address entries_;
friend class SafepointTableBuilder;
+ friend class SafepointEntry;
+
+ DISALLOW_COPY_AND_ASSIGN(SafepointTable);
};
class Safepoint BASE_EMBEDDED {
public:
- static const int kNoDeoptimizationIndex = 0x00ffffff;
+ static const int kNoDeoptimizationIndex =
+ (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
void DefinePointerSlot(int index) { indexes_->Add(index); }
- void DefinePointerRegister(Register reg) { registers_->Add(reg.code()); }
+ void DefinePointerRegister(Register reg);
private:
Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
@@ -153,6 +220,16 @@ class SafepointTableBuilder BASE_EMBEDDED {
int arguments,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+ // Define a new safepoint with all double registers and the normal
+ // registers on the stack for the current position in the body and
+ // take the number of arguments on top of the registers into account.
+ // TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
+ // methods to one method that uses template arguments.
+ Safepoint DefineSafepointWithRegistersAndDoubles(
+ Assembler* assembler,
+ int arguments,
+ int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
// Update the last safepoint with the size of the code generated for the gap
// following it.
void SetPcAfterGap(int pc) {
@@ -170,9 +247,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
unsigned pc;
unsigned deoptimization_index;
unsigned pc_after_gap;
+ unsigned arguments;
+ bool has_doubles;
};
- uint32_t EncodeDeoptimizationIndexAndGap(DeoptimizationInfo info);
+ uint32_t EncodeExceptPC(const DeoptimizationInfo& info);
ZoneList<DeoptimizationInfo> deoptimization_info_;
ZoneList<ZoneList<int>*> indexes_;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index b668df50..1024ad18 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -64,10 +64,10 @@ class UC16CharacterStream {
// Returns and advances past the next UC16 character in the input
// stream. If there are no more characters, it returns a negative
// value.
- inline int32_t Advance() {
+ inline uc32 Advance() {
if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
pos_++;
- return *(buffer_cursor_++);
+ return static_cast<uc32>(*(buffer_cursor_++));
}
// Note: currently the following increment is necessary to avoid a
// parser problem! The scanner treats the final kEndOfInput as
@@ -97,13 +97,14 @@ class UC16CharacterStream {
return SlowSeekForward(character_count);
}
- // Pushes back the most recently read UC16 character, i.e.,
- // the value returned by the most recent call to Advance.
+ // Pushes back the most recently read UC16 character (or negative
+ // value if at end of input), i.e., the value returned by the most recent
+ // call to Advance.
// Must not be used right after calling SeekForward.
- virtual void PushBack(uc16 character) = 0;
+ virtual void PushBack(int32_t character) = 0;
protected:
- static const int32_t kEndOfInput = -1;
+ static const uc32 kEndOfInput = -1;
// Ensures that the buffer_cursor_ points to the character at
// position pos_ of the input, if possible. If the position
diff --git a/src/scanner.cc b/src/scanner.cc
index 7fd6ef22..b66d10b9 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -48,14 +48,18 @@ BufferedUC16CharacterStream::BufferedUC16CharacterStream()
BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
-void BufferedUC16CharacterStream::PushBack(uc16 character) {
+void BufferedUC16CharacterStream::PushBack(uc32 character) {
+ if (character == kEndOfInput) {
+ pos_--;
+ return;
+ }
if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
// buffer_ is writable, buffer_cursor_ is const pointer.
- buffer_[--buffer_cursor_ - buffer_] = character;
+ buffer_[--buffer_cursor_ - buffer_] = static_cast<uc16>(character);
pos_--;
return;
}
- SlowPushBack(character);
+ SlowPushBack(static_cast<uc16>(character));
}
diff --git a/src/scanner.h b/src/scanner.h
index bdf899b5..d7621825 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -43,7 +43,7 @@ class BufferedUC16CharacterStream: public UC16CharacterStream {
BufferedUC16CharacterStream();
virtual ~BufferedUC16CharacterStream();
- virtual void PushBack(uc16 character);
+ virtual void PushBack(uc32 character);
protected:
static const unsigned kBufferSize = 512;
@@ -107,11 +107,12 @@ class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
int end_position);
virtual ~ExternalTwoByteStringUC16CharacterStream();
- virtual void PushBack(uc16 character) {
+ virtual void PushBack(uc32 character) {
ASSERT(buffer_cursor_ > raw_data_);
buffer_cursor_--;
pos_--;
}
+
protected:
virtual unsigned SlowSeekForward(unsigned delta) {
// Fast case always handles seeking.
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index e054d7de..e06235af 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -152,19 +152,18 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
//
// - function name
//
+// - calls eval boolean flag
+//
// - number of variables in the context object (smi) (= function context
// slot index + 1)
// - list of pairs (name, Var mode) of context-allocated variables (starting
// with context slot 0)
-// - NULL (sentinel)
//
// - number of parameters (smi)
// - list of parameter names (starting with parameter 0 first)
-// - NULL (sentinel)
//
// - number of variables on the stack (smi)
// - list of names of stack-allocated variables (starting with stack slot 0)
-// - NULL (sentinel)
// The ScopeInfo representation could be simplified and the ScopeInfo
// re-implemented (with almost the same interface). Here is a
diff --git a/src/scopes.cc b/src/scopes.cc
index 3565e11b..d3f54ad3 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -112,68 +112,92 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// Dummy constructor
Scope::Scope(Type type)
- : outer_scope_(NULL),
- inner_scopes_(0),
- type_(type),
- scope_name_(Factory::empty_symbol()),
+ : inner_scopes_(0),
variables_(false),
temps_(0),
params_(0),
- dynamics_(NULL),
unresolved_(0),
- decls_(0),
- receiver_(NULL),
- function_(NULL),
- arguments_(NULL),
- arguments_shadow_(NULL),
- illegal_redecl_(NULL),
- scope_inside_with_(false),
- scope_contains_with_(false),
- scope_calls_eval_(false),
- outer_scope_calls_eval_(false),
- inner_scope_calls_eval_(false),
- outer_scope_is_eval_scope_(false),
- force_eager_compilation_(false),
- num_stack_slots_(0),
- num_heap_slots_(0) {
+ decls_(0) {
+ SetDefaults(type, NULL, NULL);
+ ASSERT(!resolved());
}
Scope::Scope(Scope* outer_scope, Type type)
- : outer_scope_(outer_scope),
- inner_scopes_(4),
- type_(type),
- scope_name_(Factory::empty_symbol()),
+ : inner_scopes_(4),
+ variables_(),
temps_(4),
params_(4),
- dynamics_(NULL),
unresolved_(16),
- decls_(4),
- receiver_(NULL),
- function_(NULL),
- arguments_(NULL),
- arguments_shadow_(NULL),
- illegal_redecl_(NULL),
- scope_inside_with_(false),
- scope_contains_with_(false),
- scope_calls_eval_(false),
- outer_scope_calls_eval_(false),
- inner_scope_calls_eval_(false),
- outer_scope_is_eval_scope_(false),
- force_eager_compilation_(false),
- num_stack_slots_(0),
- num_heap_slots_(0) {
+ decls_(4) {
+ SetDefaults(type, outer_scope, NULL);
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
ASSERT(!HasIllegalRedeclaration());
+ ASSERT(!resolved());
}
+Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
+ : inner_scopes_(4),
+ variables_(),
+ temps_(4),
+ params_(4),
+ unresolved_(16),
+ decls_(4) {
+ ASSERT(scope_info != NULL);
+ SetDefaults(FUNCTION_SCOPE, inner_scope->outer_scope(), scope_info);
+ ASSERT(resolved());
+ InsertAfterScope(inner_scope);
+ if (scope_info->HasHeapAllocatedLocals()) {
+ num_heap_slots_ = scope_info_->NumberOfContextSlots();
+ }
+
+ // This scope's arguments shadow (if present) is context-allocated if an inner
+ // scope accesses this one's parameters. Allocate the arguments_shadow_
+ // variable if necessary.
+ Variable::Mode mode;
+ int arguments_shadow_index =
+ scope_info_->ContextSlotIndex(Heap::arguments_shadow_symbol(), &mode);
+ if (arguments_shadow_index >= 0) {
+ ASSERT(mode == Variable::INTERNAL);
+ arguments_shadow_ = new Variable(this,
+ Factory::arguments_shadow_symbol(),
+ Variable::INTERNAL,
+ true,
+ Variable::ARGUMENTS);
+ arguments_shadow_->set_rewrite(
+ new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
+ arguments_shadow_->set_is_used(true);
+ }
+}
+
+
+
bool Scope::Analyze(CompilationInfo* info) {
ASSERT(info->function() != NULL);
Scope* top = info->function()->scope();
+
+ // If we have a serialized scope info, reuse it.
+ if (!info->closure().is_null()) {
+ SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
+ if (scope_info != SerializedScopeInfo::Empty()) {
+ Scope* scope = top;
+ JSFunction* current = *info->closure();
+ do {
+ current = current->context()->closure();
+ SerializedScopeInfo* scope_info = current->shared()->scope_info();
+ if (scope_info != SerializedScopeInfo::Empty()) {
+ scope = new Scope(scope, scope_info);
+ } else {
+ ASSERT(current->context()->IsGlobalContext());
+ }
+ } while (!current->context()->IsGlobalContext());
+ }
+ }
+
while (top->outer_scope() != NULL) top = top->outer_scope();
top->AllocateVariables(info->calling_context());
@@ -191,6 +215,8 @@ bool Scope::Analyze(CompilationInfo* info) {
void Scope::Initialize(bool inside_with) {
+ ASSERT(!resolved());
+
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
outer_scope_->inner_scopes_.Add(this);
@@ -210,7 +236,7 @@ void Scope::Initialize(bool inside_with) {
Variable* var =
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
- var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
+ var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
receiver_ = var;
if (is_function_scope()) {
@@ -224,7 +250,56 @@ void Scope::Initialize(bool inside_with) {
Variable* Scope::LocalLookup(Handle<String> name) {
- return variables_.Lookup(name);
+ Variable* result = variables_.Lookup(name);
+ if (result != NULL || !resolved()) {
+ return result;
+ }
+ // If the scope is resolved, we can find a variable in serialized scope info.
+
+ // We should never lookup 'arguments' in this scope
+ // as it is implicitly present in any scope.
+ ASSERT(*name != *Factory::arguments_symbol());
+
+ // Assert that there is no local slot with the given name.
+ ASSERT(scope_info_->StackSlotIndex(*name) < 0);
+
+ // Check context slot lookup.
+ Variable::Mode mode;
+ int index = scope_info_->ContextSlotIndex(*name, &mode);
+ if (index >= 0) {
+ Variable* var =
+ variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
+ return var;
+ }
+
+ index = scope_info_->ParameterIndex(*name);
+ if (index >= 0) {
+ // ".arguments" must be present in context slots.
+ ASSERT(arguments_shadow_ != NULL);
+ Variable* var =
+ variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
+ Property* rewrite =
+ new Property(new VariableProxy(arguments_shadow_),
+ new Literal(Handle<Object>(Smi::FromInt(index))),
+ RelocInfo::kNoPosition,
+ Property::SYNTHETIC);
+ rewrite->set_is_arguments_access(true);
+ var->set_rewrite(rewrite);
+ return var;
+ }
+
+ index = scope_info_->FunctionContextSlotIndex(*name);
+ if (index >= 0) {
+ // Check that there is no local slot with the given name.
+ ASSERT(scope_info_->StackSlotIndex(*name) < 0);
+ Variable* var =
+ variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
+ return var;
+ }
+
+ return NULL;
}
@@ -250,6 +325,7 @@ Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
// DYNAMIC variables are introduces during variable allocation,
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
+ ASSERT(!resolved());
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -273,6 +349,7 @@ VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
+ ASSERT(!resolved());
VariableProxy* proxy = new VariableProxy(name, false, inside_with);
unresolved_.Add(proxy);
return proxy;
@@ -292,6 +369,7 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
Variable* Scope::NewTemporary(Handle<String> name) {
+ ASSERT(!resolved());
Variable* var =
new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
temps_.Add(var);
@@ -550,7 +628,7 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
// Declare a new non-local.
var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup.
- var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
+ var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
}
return var;
}
@@ -612,8 +690,9 @@ Variable* Scope::LookupRecursive(Handle<String> name,
ASSERT(var != NULL);
// If this is a lookup from an inner scope, mark the variable.
- if (inner_lookup)
- var->is_accessed_from_inner_scope_ = true;
+ if (inner_lookup) {
+ var->MarkAsAccessedFromInnerScope();
+ }
// If the variable we have found is just a guess, invalidate the
// result. If the found variable is local, record that fact so we
@@ -753,7 +832,7 @@ bool Scope::MustAllocate(Variable* var) {
// via an eval() call. This is only possible if the variable has a
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
- (var->is_accessed_from_inner_scope_ ||
+ (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_)) {
var->set_is_used(true);
@@ -771,7 +850,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
// context.
return
var->mode() != Variable::TEMPORARY &&
- (var->is_accessed_from_inner_scope_ ||
+ (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_ || var->is_global());
}
@@ -787,12 +866,12 @@ bool Scope::HasArgumentsParameter() {
void Scope::AllocateStackSlot(Variable* var) {
- var->rewrite_ = new Slot(var, Slot::LOCAL, num_stack_slots_++);
+ var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
}
void Scope::AllocateHeapSlot(Variable* var) {
- var->rewrite_ = new Slot(var, Slot::CONTEXT, num_heap_slots_++);
+ var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
}
@@ -857,7 +936,7 @@ void Scope::AllocateParameterLocals() {
// It is ok to set this only now, because arguments is a local
// variable that is allocated after the parameters have been
// allocated.
- arguments_shadow_->is_accessed_from_inner_scope_ = true;
+ arguments_shadow_->MarkAsAccessedFromInnerScope();
}
Property* rewrite =
new Property(new VariableProxy(arguments_shadow_),
@@ -865,7 +944,7 @@ void Scope::AllocateParameterLocals() {
RelocInfo::kNoPosition,
Property::SYNTHETIC);
rewrite->set_is_arguments_access(true);
- var->rewrite_ = rewrite;
+ var->set_rewrite(rewrite);
}
}
@@ -880,23 +959,23 @@ void Scope::AllocateParameterLocals() {
ASSERT(var->scope() == this);
if (MustAllocate(var)) {
if (MustAllocateInContext(var)) {
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::CONTEXT));
- if (var->rewrite_ == NULL) {
+ if (var->rewrite() == NULL) {
// Only set the heap allocation if the parameter has not
// been allocated yet.
AllocateHeapSlot(var);
}
} else {
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::PARAMETER));
// Set the parameter index always, even if the parameter
// was seen before! (We need to access the actual parameter
// supplied for the last occurrence of a multiply declared
// parameter.)
- var->rewrite_ = new Slot(var, Slot::PARAMETER, i);
+ var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
}
}
}
@@ -906,10 +985,10 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(var->rewrite_ == NULL ||
+ ASSERT(var->rewrite() == NULL ||
(!var->IsVariable(Factory::result_symbol())) ||
(var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
- if (var->rewrite_ == NULL && MustAllocate(var)) {
+ if (var->rewrite() == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
} else {
@@ -943,15 +1022,18 @@ void Scope::AllocateNonParameterLocals() {
void Scope::AllocateVariablesRecursively() {
- // The number of slots required for variables.
- num_stack_slots_ = 0;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
-
// Allocate variables for inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
inner_scopes_[i]->AllocateVariablesRecursively();
}
+ // If scope is already resolved, we still need to allocate
+ // variables in inner scopes which might not had been resolved yet.
+ if (resolved()) return;
+ // The number of slots required for variables.
+ num_stack_slots_ = 0;
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
if (is_function_scope()) AllocateParameterLocals();
diff --git a/src/scopes.h b/src/scopes.h
index d909b81f..09901ade 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -302,6 +302,14 @@ class Scope: public ZoneObject {
explicit Scope(Type type);
+ void InsertAfterScope(Scope* scope) {
+ inner_scopes_.Add(scope);
+ outer_scope_ = scope->outer_scope_;
+ outer_scope_->inner_scopes_.RemoveElement(scope);
+ outer_scope_->inner_scopes_.Add(this);
+ scope->outer_scope_ = this;
+ }
+
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
@@ -355,6 +363,10 @@ class Scope: public ZoneObject {
int num_stack_slots_;
int num_heap_slots_;
+ // Serialized scopes support.
+ SerializedScopeInfo* scope_info_;
+ bool resolved() { return scope_info_ != NULL; }
+
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
Variable* NonLocal(Handle<String> name, Variable::Mode mode);
@@ -386,6 +398,33 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocal(Variable* var);
void AllocateNonParameterLocals();
void AllocateVariablesRecursively();
+
+ private:
+ Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
+
+ void SetDefaults(Type type,
+ Scope* outer_scope,
+ SerializedScopeInfo* scope_info) {
+ outer_scope_ = outer_scope;
+ type_ = type;
+ scope_name_ = Factory::empty_symbol();
+ dynamics_ = NULL;
+ receiver_ = NULL;
+ function_ = NULL;
+ arguments_ = NULL;
+ arguments_shadow_ = NULL;
+ illegal_redecl_ = NULL;
+ scope_inside_with_ = false;
+ scope_contains_with_ = false;
+ scope_calls_eval_ = false;
+ outer_scope_calls_eval_ = false;
+ inner_scope_calls_eval_ = false;
+ outer_scope_is_eval_scope_ = false;
+ force_eager_compilation_ = false;
+ num_stack_slots_ = 0;
+ num_heap_slots_ = 0;
+ scope_info_ = scope_info;
+ }
};
diff --git a/src/serialize.cc b/src/serialize.cc
index 19e65185..6a6c6bbd 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -486,17 +486,21 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
36,
"LDoubleConstant::one_half");
- Add(ExternalReference::address_of_negative_infinity().address(),
+ Add(ExternalReference::address_of_minus_zero().address(),
UNCLASSIFIED,
37,
+ "LDoubleConstant::minus_zero");
+ Add(ExternalReference::address_of_negative_infinity().address(),
+ UNCLASSIFIED,
+ 38,
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function().address(),
UNCLASSIFIED,
- 38,
+ 39,
"power_double_double_function");
Add(ExternalReference::power_double_int_function().address(),
UNCLASSIFIED,
- 39,
+ 40,
"power_double_int_function");
Add(ExternalReference::arguments_marker_location().address(),
UNCLASSIFIED,
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 86e72012..295cc4a6 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -29,6 +29,7 @@
#include "api.h"
#include "arguments.h"
+#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
#include "vm-state-inl.h"
@@ -122,6 +123,7 @@ MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
@@ -146,6 +148,7 @@ MaybeObject* StubCache::ComputeLoadField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -171,6 +174,7 @@ MaybeObject* StubCache::ComputeLoadCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -196,6 +200,7 @@ MaybeObject* StubCache::ComputeLoadConstant(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -219,6 +224,7 @@ MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -252,6 +258,7 @@ MaybeObject* StubCache::ComputeLoadGlobal(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -276,6 +283,7 @@ MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -301,6 +309,7 @@ MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -325,6 +334,7 @@ MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -350,6 +360,7 @@ MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -373,6 +384,7 @@ MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -395,6 +407,7 @@ MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -416,6 +429,7 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -461,6 +475,7 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -509,6 +524,7 @@ MaybeObject* StubCache::ComputeStoreGlobal(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -532,6 +548,7 @@ MaybeObject* StubCache::ComputeStoreCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -554,6 +571,7 @@ MaybeObject* StubCache::ComputeStoreInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -579,6 +597,7 @@ MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
}
PROFILE(CodeCreateEvent(
Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@@ -594,6 +613,7 @@ MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
MaybeObject* StubCache::ComputeCallConstant(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
String* name,
Object* object,
JSObject* holder,
@@ -613,12 +633,12 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
check = BOOLEAN_CHECK;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind,
- CONSTANT_FUNCTION,
- cache_holder,
- in_loop,
- argc);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+ CONSTANT_FUNCTION,
+ extra_ic_state,
+ cache_holder,
+ in_loop,
+ argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@@ -627,7 +647,8 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
+ CallStubCompiler compiler(
+ argc, in_loop, kind, extra_ic_state, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallConstant(object, holder, function, name, check);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -636,6 +657,7 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@@ -667,12 +689,14 @@ MaybeObject* StubCache::ComputeCallField(int argc,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
+ Code::kNoExtraICState,
cache_holder,
in_loop,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
+ CallStubCompiler compiler(
+ argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallField(JSObject::cast(object),
holder,
@@ -683,6 +707,7 @@ MaybeObject* StubCache::ComputeCallField(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@@ -710,15 +735,16 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
object = holder;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind,
- INTERCEPTOR,
- cache_holder,
- NOT_IN_LOOP,
- argc);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+ INTERCEPTOR,
+ Code::kNoExtraICState,
+ cache_holder,
+ NOT_IN_LOOP,
+ argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
+ CallStubCompiler compiler(
+ argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -726,6 +752,7 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@@ -760,12 +787,12 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(receiver, holder);
JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind,
- NORMAL,
- cache_holder,
- in_loop,
- argc);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+ NORMAL,
+ Code::kNoExtraICState,
+ cache_holder,
+ in_loop,
+ argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@@ -773,7 +800,8 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
+ CallStubCompiler compiler(
+ argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -781,6 +809,7 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
+ GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@@ -839,8 +868,12 @@ static MaybeObject* FillCache(MaybeObject* maybe_code) {
Code* StubCache::FindCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ UNINITIALIZED,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* result = ProbeCache(flags)->ToObjectUnchecked();
ASSERT(!result->IsUndefined());
// This might be called during the marking phase of the collector
@@ -852,8 +885,12 @@ Code* StubCache::FindCallInitialize(int argc,
MaybeObject* StubCache::ComputeCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ UNINITIALIZED,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -895,8 +932,12 @@ Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc,
MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, in_loop, PREMONOMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ PREMONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -910,8 +951,12 @@ MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
MaybeObject* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, in_loop, MONOMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ MONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -925,8 +970,12 @@ MaybeObject* StubCache::ComputeCallNormal(int argc,
MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, in_loop, MEGAMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ MEGAMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -940,8 +989,13 @@ MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- kind, NOT_IN_LOOP, MONOMORPHIC_PROTOTYPE_FAILURE, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC_PROTOTYPE_FAILURE,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc,
+ OWN_MAP);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -954,8 +1008,12 @@ MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
#ifdef ENABLE_DEBUGGER_SUPPORT
MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ DEBUG_BREAK,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -968,12 +1026,12 @@ MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
Code::Kind kind) {
- Code::Flags flags =
- Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- DEBUG_PREPARE_STEP_IN,
- NORMAL,
- argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ DEBUG_PREPARE_STEP_IN,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@@ -1257,6 +1315,7 @@ MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
return result;
}
@@ -1282,6 +1341,7 @@ MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
return result;
}
@@ -1304,6 +1364,7 @@ MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
return result;
}
@@ -1328,6 +1389,7 @@ MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
return result;
}
@@ -1350,6 +1412,7 @@ MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
return result;
}
@@ -1449,6 +1512,9 @@ MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
+ name,
+ Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@@ -1461,6 +1527,9 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
+ GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
+ name,
+ Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@@ -1473,6 +1542,9 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
+ GDBJIT(AddCode(GDBJITInterface::STORE_IC,
+ name,
+ Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@@ -1485,6 +1557,9 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
+ GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
+ name,
+ Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@@ -1493,11 +1568,13 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
CallStubCompiler::CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
InlineCacheHolderFlag cache_holder)
- : arguments_(argc)
- , in_loop_(in_loop)
- , kind_(kind)
- , cache_holder_(cache_holder) {
+ : arguments_(argc),
+ in_loop_(in_loop),
+ kind_(kind),
+ extra_ic_state_(extra_ic_state),
+ cache_holder_(cache_holder) {
}
@@ -1534,6 +1611,7 @@ MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
+ extra_ic_state_,
cache_holder_,
in_loop_,
argc);
@@ -1559,6 +1637,7 @@ MaybeObject* ConstructStubCompiler::GetCode() {
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+ GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
return result;
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index a7829a60..85dd5f6a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -177,13 +177,15 @@ class StubCache : public AllStatic {
JSObject* holder,
int index);
- MUST_USE_RESULT static MaybeObject* ComputeCallConstant(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function);
+ MUST_USE_RESULT static MaybeObject* ComputeCallConstant(
+ int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ Code::ExtraICState extra_ic_state,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ JSFunction* function);
MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
InLoopFlag in_loop,
@@ -660,6 +662,7 @@ class CallStubCompiler: public StubCompiler {
CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
+ Code::ExtraICState extra_ic_state,
InlineCacheHolderFlag cache_holder);
MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
@@ -705,6 +708,7 @@ class CallStubCompiler: public StubCompiler {
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
+ const Code::ExtraICState extra_ic_state_;
const InlineCacheHolderFlag cache_holder_;
const ParameterCount& arguments() { return arguments_; }
diff --git a/src/third_party/strongtalk/LICENSE b/src/third_party/strongtalk/LICENSE
new file mode 100644
index 00000000..7473a7b2
--- /dev/null
+++ b/src/third_party/strongtalk/LICENSE
@@ -0,0 +1,29 @@
+Copyright (c) 1994-2006 Sun Microsystems Inc.
+All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+- Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+- Redistribution in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+- Neither the name of Sun Microsystems or the names of contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/strongtalk/README.chromium b/src/third_party/strongtalk/README.chromium
new file mode 100644
index 00000000..ba2b789f
--- /dev/null
+++ b/src/third_party/strongtalk/README.chromium
@@ -0,0 +1,18 @@
+Name: Strongtalk
+URL: http://www.strongtalk.org/
+
+Code from the Strongtalk assembler is used with modification in the following
+files:
+
+src/assembler.h
+src/assembler.cc
+src/arm/assembler-arm.cc
+src/arm/assembler-arm.h
+src/arm/assembler-arm-inl.h
+src/ia32/assembler-ia32.cc
+src/ia32/assembler-ia32.h
+src/ia32/assembler-ia32-inl.h
+src/mips/assembler-mips.cc
+src/mips/assembler-mips.h
+src/mips/assembler-mips-inl.h
+src/x64/assembler-x64.h
diff --git a/src/token.h b/src/token.h
index 2f5ca1b5..fb890d23 100644
--- a/src/token.h
+++ b/src/token.h
@@ -217,7 +217,7 @@ class Token {
// Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT).
static const char* Name(Value tok) {
- ASSERT(0 <= tok && tok < NUM_TOKENS);
+ ASSERT(tok < NUM_TOKENS); // tok is unsigned
return name_[tok];
}
@@ -292,14 +292,14 @@ class Token {
// (.e., "<" for the token LT) or NULL if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
static const char* String(Value tok) {
- ASSERT(0 <= tok && tok < NUM_TOKENS);
+ ASSERT(tok < NUM_TOKENS); // tok is unsigned.
return string_[tok];
}
// Returns the precedence > 0 for binary and compare
// operators; returns 0 otherwise.
static int Precedence(Value tok) {
- ASSERT(0 <= tok && tok < NUM_TOKENS);
+ ASSERT(tok < NUM_TOKENS); // tok is unsigned.
return precedence_[tok];
}
diff --git a/src/type-info.cc b/src/type-info.cc
index 8719439a..f4f65e99 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -58,7 +58,12 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
}
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code) {
+STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
+
+
+TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
+ Handle<Context> global_context) {
+ global_context_ = global_context;
Initialize(code);
}
@@ -71,17 +76,18 @@ void TypeFeedbackOracle::Initialize(Handle<Code> code) {
bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
- return IsMonomorphic(expr->position());
+ return GetElement(map_, expr->position())->IsMap();
}
bool TypeFeedbackOracle:: StoreIsMonomorphic(Assignment* expr) {
- return IsMonomorphic(expr->position());
+ return GetElement(map_, expr->position())->IsMap();
}
bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
- return IsMonomorphic(expr->position());
+ Handle<Object> value = GetElement(map_, expr->position());
+ return value->IsMap() || value->IsSmi();
}
@@ -97,12 +103,6 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
}
-Handle<Map> TypeFeedbackOracle::CallMonomorphicReceiverType(Call* expr) {
- ASSERT(CallIsMonomorphic(expr));
- return Handle<Map>::cast(GetElement(map_, expr->position()));
-}
-
-
ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
@@ -120,12 +120,51 @@ ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
Handle<String> name) {
int arity = expr->arguments()->length();
- Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::CALL_IC, NORMAL, OWN_MAP, NOT_IN_LOOP, arity);
+ // Note: these flags won't let us get maps from stubs with
+ // non-default extra ic state in the megamorphic case. In the more
+ // important monomorphic case the map is obtained directly, so it's
+ // not a problem until we decide to emit more polymorphic code.
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
+ NORMAL,
+ Code::kNoExtraICState,
+ OWN_MAP,
+ NOT_IN_LOOP,
+ arity);
return CollectReceiverTypes(expr->position(), name, flags);
}
+CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
+ Handle<Object> value = GetElement(map_, expr->position());
+ if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
+ CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
+ ASSERT(check != RECEIVER_MAP_CHECK);
+ return check;
+}
+
+
+Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
+ CheckType check) {
+ JSFunction* function = NULL;
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ UNREACHABLE();
+ break;
+ case STRING_CHECK:
+ function = global_context_->string_function();
+ break;
+ case NUMBER_CHECK:
+ function = global_context_->number_function();
+ break;
+ case BOOLEAN_CHECK:
+ function = global_context_->boolean_function();
+ break;
+ }
+ ASSERT(function != NULL);
+ return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
+}
+
+
bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
Handle<Object> object = GetElement(map_, expr->position());
return *object == Builtins::builtin(id);
@@ -220,6 +259,7 @@ TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
return unknown;
}
+
TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
Handle<Object> object = GetElement(map_, clause->position());
TypeInfo unknown = TypeInfo::Unknown();
@@ -247,12 +287,11 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
}
-
ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
Handle<String> name,
Code::Flags flags) {
Handle<Object> object = GetElement(map_, position);
- if (object->IsUndefined()) return NULL;
+ if (object->IsUndefined() || object->IsSmi()) return NULL;
if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
// TODO(fschneider): We could collect the maps and signal that
@@ -301,11 +340,20 @@ void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
SetElement(map_, position, target);
}
} else if (state == MONOMORPHIC) {
- Handle<Map> map = Handle<Map>(target->FindFirstMap());
- if (*map == NULL) {
- SetElement(map_, position, target);
+ if (target->kind() != Code::CALL_IC ||
+ target->check_type() == RECEIVER_MAP_CHECK) {
+ Handle<Map> map = Handle<Map>(target->FindFirstMap());
+ if (*map == NULL) {
+ SetElement(map_, position, target);
+ } else {
+ SetElement(map_, position, map);
+ }
} else {
- SetElement(map_, position, map);
+ ASSERT(target->kind() == Code::CALL_IC);
+ CheckType check = target->check_type();
+ ASSERT(check != RECEIVER_MAP_CHECK);
+ SetElement(map_, position, Handle<Object>(Smi::FromInt(check)));
+ ASSERT(Smi::cast(*GetElement(map_, position))->value() == check);
}
} else if (state == MEGAMORPHIC) {
SetElement(map_, position, target);
@@ -342,8 +390,6 @@ void TypeFeedbackOracle::CollectPositions(Code* code,
} else if (kind == Code::COMPARE_IC) {
if (target->compare_state() == CompareIC::GENERIC) continue;
} else {
- if (kind == Code::CALL_IC && state == MONOMORPHIC &&
- target->check_type() != RECEIVER_MAP_CHECK) continue;
if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
}
code_positions->Add(
diff --git a/src/type-info.h b/src/type-info.h
index cb3e75d8..e026e88c 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -219,6 +219,12 @@ class TypeInfo {
};
+enum StringStubFeedback {
+ DEFAULT_STRING_STUB = 0,
+ STRING_INDEX_OUT_OF_BOUNDS = 1
+};
+
+
// Forward declarations.
class Assignment;
class BinaryOperation;
@@ -236,7 +242,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
RESULT
};
- explicit TypeFeedbackOracle(Handle<Code> code);
+ TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
bool LoadIsMonomorphic(Property* expr);
bool StoreIsMonomorphic(Assignment* expr);
@@ -244,12 +250,14 @@ class TypeFeedbackOracle BASE_EMBEDDED {
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
- Handle<Map> CallMonomorphicReceiverType(Call* expr);
ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
+ CheckType GetCallCheckType(Call* expr);
+ Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
+
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares.
@@ -260,8 +268,6 @@ class TypeFeedbackOracle BASE_EMBEDDED {
private:
void Initialize(Handle<Code> code);
- bool IsMonomorphic(int pos) { return GetElement(map_, pos)->IsMap(); }
-
ZoneMapList* CollectReceiverTypes(int position,
Handle<String> name,
Code::Flags flags);
@@ -272,6 +278,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
List<int>* code_positions,
List<int>* source_positions);
+ Handle<Context> global_context_;
Handle<JSObject> map_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/src/v8globals.h b/src/v8globals.h
index 65bbf6ab..3f27114b 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -77,7 +77,8 @@ const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
-const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
+const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
+const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeed);
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
diff --git a/src/v8utils.h b/src/v8utils.h
index 095a8b15..e9623be6 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -29,7 +29,7 @@
#define V8_V8UTILS_H_
#include "utils.h"
-#include "platform.h" // For va_list on Solaris.
+#include "platform.h" // For va_list on Solaris.
namespace v8 {
namespace internal {
diff --git a/src/variables.cc b/src/variables.cc
index c1440b7f..7f580fc6 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -98,6 +98,12 @@ bool Variable::IsStackLocal() const {
}
+bool Variable::IsContextSlot() const {
+ Slot* s = AsSlot();
+ return s != NULL && s->type() == Slot::CONTEXT;
+}
+
+
Variable::Variable(Scope* scope,
Handle<String> name,
Mode mode,
diff --git a/src/variables.h b/src/variables.h
index 9e460f76..882a52ed 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -138,6 +138,9 @@ class Variable: public ZoneObject {
bool is_accessed_from_inner_scope() const {
return is_accessed_from_inner_scope_;
}
+ void MarkAsAccessedFromInnerScope() {
+ is_accessed_from_inner_scope_ = true;
+ }
bool is_used() { return is_used_; }
void set_is_used(bool flag) { is_used_ = flag; }
@@ -148,6 +151,7 @@ class Variable: public ZoneObject {
bool IsStackAllocated() const;
bool IsParameter() const; // Includes 'this'.
bool IsStackLocal() const;
+ bool IsContextSlot() const;
bool is_dynamic() const {
return (mode_ == DYNAMIC ||
@@ -175,6 +179,7 @@ class Variable: public ZoneObject {
}
Expression* rewrite() const { return rewrite_; }
+ void set_rewrite(Expression* expr) { rewrite_ = expr; }
StaticType* type() { return &type_; }
@@ -197,8 +202,6 @@ class Variable: public ZoneObject {
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_;
-
- friend class Scope; // Has explicit access to rewrite_.
};
diff --git a/src/version.cc b/src/version.cc
index c1cc2fc0..495de314 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
-#define BUILD_NUMBER 7
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 36299677..de01cfa3 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -186,6 +186,20 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
CPU::FlushICache(pc_, instruction_count);
}
+
+// -----------------------------------------------------------------------------
+// Register constants.
+
+const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = {
+ // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
+ 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+};
+
+const int Register::allocationIndexByRegisterCode[kNumRegisters] = {
+ 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+};
+
+
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -2936,6 +2950,12 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::db(uint8_t data) {
+ EnsureSpace ensure_space(this);
+ emit(data);
+}
+
+
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emitl(data);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7bcc7c56..be837f04 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -98,19 +98,29 @@ struct Register {
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 10;
+ static int ToAllocationIndex(Register reg) {
+ return allocationIndexByRegisterCode[reg.code()];
+ }
+
+ static Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ Register result = { registerCodeByAllocationIndex[index] };
+ return result;
+ }
+
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"rax",
- "rcx",
- "rdx",
"rbx",
+ "rdx",
+ "rcx",
"rdi",
"r8",
"r9",
"r11",
- "r12",
- "r14"
+ "r14",
+ "r12"
};
return names[index];
}
@@ -143,6 +153,9 @@ struct Register {
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
int code_;
+ private:
+ static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int allocationIndexByRegisterCode[kNumRegisters];
};
const Register rax = { 0 };
@@ -173,6 +186,12 @@ struct XMMRegister {
return reg.code() - 1;
}
+ static XMMRegister FromAllocationIndex(int index) {
+ ASSERT(0 <= index && index < kNumAllocatableRegisters);
+ XMMRegister result = { index + 1 };
+ return result;
+ }
+
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
@@ -196,6 +215,7 @@ struct XMMRegister {
}
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
@@ -1241,7 +1261,7 @@ class Assembler : public Malloced {
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void db(uint8_t data) { UNIMPLEMENTED(); }
+ void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 5056f348..9feced2f 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -198,6 +198,7 @@ class GenericBinaryOpStub: public CodeStub {
}
friend class CodeGenerator;
+ friend class LCodeGen;
};
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 4e890cd4..6b19d3f1 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -27,6 +27,8 @@
#include "v8.h"
+#if defined(V8_TARGET_ARCH_X64)
+
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@@ -39,7 +41,8 @@ namespace internal {
int Deoptimizer::table_entry_size_ = 10;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- UNIMPLEMENTED();
+ // UNIMPLEMENTED, for now just return.
+ return;
}
@@ -66,7 +69,8 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::EntryGenerator::Generate() {
- UNIMPLEMENTED();
+ // UNIMPLEMENTED, for now just return.
+ return;
}
@@ -75,3 +79,5 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
}
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index fbbf176e..a2a0e7e9 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -45,7 +45,7 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
// TODO(x64): This should not be 0.
-static const int kNumSafepointRegisters = 0;
+static const int kNumSafepointRegisters = 8;
// ----------------------------------------------------
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 29cbed05..b54aeb97 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1178,8 +1178,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
new file mode 100644
index 00000000..151fad73
--- /dev/null
+++ b/src/x64/lithium-codegen-x64.cc
@@ -0,0 +1,1547 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "x64/lithium-codegen-x64.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class LGapNode: public ZoneObject {
+ public:
+ explicit LGapNode(LOperand* operand)
+ : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+ LOperand* operand() const { return operand_; }
+ bool IsResolved() const { return !IsAssigned() || resolved_; }
+ void MarkResolved() {
+ ASSERT(!IsResolved());
+ resolved_ = true;
+ }
+ int visited_id() const { return visited_id_; }
+ void set_visited_id(int id) {
+ ASSERT(id > visited_id_);
+ visited_id_ = id;
+ }
+
+ bool IsAssigned() const { return assigned_from_.is_set(); }
+ LGapNode* assigned_from() const { return assigned_from_.get(); }
+ void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+ LOperand* operand_;
+ SetOncePointer<LGapNode> assigned_from_;
+ bool resolved_;
+ int visited_id_;
+};
+
+
+LGapResolver::LGapResolver()
+ : nodes_(32),
+ identified_cycles_(4),
+ result_(16),
+ next_visited_id_(0) {
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::Resolve(
+ const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand) {
+ nodes_.Rewind(0);
+ identified_cycles_.Rewind(0);
+ result_.Rewind(0);
+ next_visited_id_ = 0;
+
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) RegisterMove(move);
+ }
+
+ for (int i = 0; i < identified_cycles_.length(); ++i) {
+ ResolveCycle(identified_cycles_[i], marker_operand);
+ }
+
+ int unresolved_nodes;
+ do {
+ unresolved_nodes = 0;
+ for (int j = 0; j < nodes_.length(); j++) {
+ LGapNode* node = nodes_[j];
+ if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+ AddResultMove(node->assigned_from(), node);
+ node->MarkResolved();
+ }
+ if (!node->IsResolved()) ++unresolved_nodes;
+ }
+ } while (unresolved_nodes > 0);
+ return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+ AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+ result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
+ ZoneList<LOperand*> cycle_operands(8);
+ cycle_operands.Add(marker_operand);
+ LGapNode* cur = start;
+ do {
+ cur->MarkResolved();
+ cycle_operands.Add(cur->operand());
+ cur = cur->assigned_from();
+ } while (cur != start);
+ cycle_operands.Add(marker_operand);
+
+ for (int i = cycle_operands.length() - 1; i > 0; --i) {
+ LOperand* from = cycle_operands[i];
+ LOperand* to = cycle_operands[i - 1];
+ AddResultMove(from, to);
+ }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+ ASSERT(a != b);
+ LGapNode* cur = a;
+ while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+ cur->set_visited_id(visited_id);
+ cur = cur->assigned_from();
+ }
+
+ return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+ ASSERT(a != b);
+ return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+ if (move.source()->IsConstantOperand()) {
+ // Constant moves should be last in the machine code. Therefore add them
+ // first to the result set.
+ AddResultMove(move.source(), move.destination());
+ } else {
+ LGapNode* from = LookupNode(move.source());
+ LGapNode* to = LookupNode(move.destination());
+ if (to->IsAssigned() && to->assigned_from() == from) {
+ move.Eliminate();
+ return;
+ }
+ ASSERT(!to->IsAssigned());
+ if (CanReach(from, to)) {
+ // This introduces a cycle. Save.
+ identified_cycles_.Add(from);
+ }
+ to->set_assigned_from(from);
+ }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+ for (int i = 0; i < nodes_.length(); ++i) {
+ if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+ }
+
+ // No node found => create a new one.
+ LGapNode* result = new LGapNode(operand);
+ nodes_.Add(result);
+ return result;
+}
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+ HPhase phase("Code generation", chunk());
+ ASSERT(is_unused());
+ status_ = GENERATING;
+ return GeneratePrologue() &&
+ GenerateBody() &&
+ GenerateDeferredCode() &&
+ GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+ ASSERT(is_done());
+ code->set_stack_slots(StackSlotCount());
+ code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+ if (FLAG_trace_bailout) {
+ SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+ PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+ PrintF("\n");
+ }
+ status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ int length = builder.position();
+ Vector<char> copy = Vector<char>::New(length + 1);
+ memcpy(copy.start(), builder.Finalize(), copy.length());
+ masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+ ASSERT(is_generating());
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS function.
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = StackSlotCount();
+ if (slots > 0) {
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(slots));
+ __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+ Label loop;
+ __ bind(&loop);
+ __ push(kScratchRegister);
+ __ decl(rax);
+ __ j(not_zero, &loop);
+ } else {
+ __ subq(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ movq(Operand(rsp, offset), rax);
+ }
+#endif
+ }
+ }
+
+ // Trace the call.
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+ ASSERT(is_generating());
+ bool emit_instructions = true;
+ for (current_instruction_ = 0;
+ !is_aborted() && current_instruction_ < instructions_->length();
+ current_instruction_++) {
+ LInstruction* instr = instructions_->at(current_instruction_);
+ if (instr->IsLabel()) {
+ LLabel* label = LLabel::cast(instr);
+ emit_instructions = !label->HasReplacement();
+ }
+
+ if (emit_instructions) {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ instr->CompileToNative(this);
+ }
+ }
+ return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+ if (current_instruction_ < instructions_->length() - 1) {
+ return instructions_->at(current_instruction_ + 1);
+ } else {
+ return NULL;
+ }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+ ASSERT(is_generating());
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Deferred code is the last part of the instruction sequence. Mark
+ // the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+ ASSERT(is_done());
+ safepoints_.Emit(masm(), StackSlotCount());
+ return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+ return Register::FromAllocationIndex(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+ return XMMRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+ ASSERT(op->IsRegister());
+ return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToDoubleRegister(op->index());
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
+bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
+ return op->IsConstantOperand() &&
+ chunk_->LookupLiteralRepresentation(op).IsTagged();
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+ Handle<Object> value = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+ ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+ value->Number());
+ return static_cast<int32_t>(value->Number());
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ Representation r = chunk_->LookupLiteralRepresentation(op);
+ ASSERT(r.IsTagged());
+ return literal;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+ // Does not handle registers. In X64 assembler, plain registers are not
+ // representable as an Operand.
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ return Operand(rbp, -(index + 3) * kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address.
+ return Operand(rbp, -(index - 1) * kPointerSize);
+ }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+ Translation* translation) {
+ if (environment == NULL) return;
+
+ // The translation includes one command per value in the environment.
+ int translation_size = environment->values()->length();
+ // The output frame height does not include the parameters.
+ int height = translation_size - environment->parameter_count();
+
+ WriteTranslation(environment->outer(), translation);
+ int closure_id = DefineDeoptimizationLiteral(environment->closure());
+ translation->BeginFrame(environment->ast_id(), closure_id, height);
+ for (int i = 0; i < translation_size; ++i) {
+ LOperand* value = environment->values()->at(i);
+ // spilled_registers_ and spilled_double_registers_ are either
+ // both NULL or both set.
+ if (environment->spilled_registers() != NULL && value != NULL) {
+ if (value->IsRegister() &&
+ environment->spilled_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(translation,
+ environment->spilled_registers()[value->index()],
+ environment->HasTaggedValueAt(i));
+ } else if (
+ value->IsDoubleRegister() &&
+ environment->spilled_double_registers()[value->index()] != NULL) {
+ translation->MarkDuplicate();
+ AddToTranslation(
+ translation,
+ environment->spilled_double_registers()[value->index()],
+ false);
+ }
+ }
+
+ AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
+ }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged) {
+ if (op == NULL) {
+ // TODO(twuerthinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ translation->StoreArgumentsObject();
+ } else if (op->IsStackSlot()) {
+ if (is_tagged) {
+ translation->StoreStackSlot(op->index());
+ } else {
+ translation->StoreInt32StackSlot(op->index());
+ }
+ } else if (op->IsDoubleStackSlot()) {
+ translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsArgument()) {
+ ASSERT(is_tagged);
+ int src_index = StackSlotCount() + op->index();
+ translation->StoreStackSlot(src_index);
+ } else if (op->IsRegister()) {
+ Register reg = ToRegister(op);
+ if (is_tagged) {
+ translation->StoreRegister(reg);
+ } else {
+ translation->StoreInt32Register(reg);
+ }
+ } else if (op->IsDoubleRegister()) {
+ XMMRegister reg = ToDoubleRegister(op);
+ translation->StoreDoubleRegister(reg);
+ } else if (op->IsConstantOperand()) {
+ Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+ int src_index = DefineDeoptimizationLiteral(literal);
+ translation->StoreLiteral(src_index);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr) {
+ if (instr != NULL) {
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ __ call(code, mode);
+ RegisterLazyDeoptimization(instr);
+ } else {
+ LPointerMap no_pointers(0);
+ RecordPosition(no_pointers.position());
+ __ call(code, mode);
+ RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+ }
+
+ // Signal that we don't inline smi code before these stubs in the
+ // optimizing code generator.
+ if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ code->kind() == Code::COMPARE_IC) {
+ __ nop();
+ }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr) {
+ Abort("Unimplemented: %s", "CallRuntime");
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+ // Create the environment to bailout to. If the call has side effects
+ // execution has to continue after the call otherwise execution can continue
+ // from a previous bailout point repeating the call.
+ LEnvironment* deoptimization_environment;
+ if (instr->HasDeoptimizationEnvironment()) {
+ deoptimization_environment = instr->deoptimization_environment();
+ } else {
+ deoptimization_environment = instr->environment();
+ }
+
+ RegisterEnvironmentForDeoptimization(deoptimization_environment);
+ RecordSafepoint(instr->pointer_map(),
+ deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+ if (!environment->HasBeenRegistered()) {
+ // Physical stack frame layout:
+ // -x ............. -4 0 ..................................... y
+ // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+ // Layout of the environment:
+ // 0 ..................................................... size-1
+ // [parameters] [locals] [expression stack including arguments]
+
+ // Layout of the translation:
+ // 0 ........................................................ size - 1 + 4
+ // [expression stack including arguments] [locals] [4 words] [parameters]
+ // |>------------ translation_size ------------<|
+
+ int frame_count = 0;
+ for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+ ++frame_count;
+ }
+ Translation translation(&translations_, frame_count);
+ WriteTranslation(environment, &translation);
+ int deoptimization_index = deoptimizations_.length();
+ environment->Register(deoptimization_index, translation.index());
+ deoptimizations_.Add(environment);
+ }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+ Abort("Unimplemented: %s", "Deoptimiz");
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ ASSERT(FLAG_deopt);
+ Handle<DeoptimizationInputData> data =
+ Factory::NewDeoptimizationInputData(length, TENURED);
+
+ data->SetTranslationByteArray(*translations_.CreateByteArray());
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+ Handle<FixedArray> literals =
+ Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, Smi::FromInt(env->ast_id()));
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.length();
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.Add(literal);
+ return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ ASSERT(deoptimization_literals_.length() == 0);
+
+ const ZoneList<Handle<JSFunction> >* inlined_closures =
+ chunk()->inlined_closures();
+
+ for (int i = 0, length = inlined_closures->length();
+ i < length;
+ i++) {
+ DefineDeoptimizationLiteral(inlined_closures->at(i));
+ }
+
+ inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ const ZoneList<LOperand*>* operands = pointers->operands();
+ Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+ deoptimization_index);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index());
+ }
+ }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index) {
+ const ZoneList<LOperand*>* operands = pointers->operands();
+ Safepoint safepoint =
+ safepoints_.DefineSafepointWithRegisters(
+ masm(), arguments, deoptimization_index);
+ for (int i = 0; i < operands->length(); i++) {
+ LOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister()) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
+ }
+ }
+ // Register rsi always contains a pointer to the context.
+ safepoint.DefinePointerRegister(rsi);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+ if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+ if (label->is_loop_header()) {
+ Comment(";;; B%d - LOOP entry", label->block_id());
+ } else {
+ Comment(";;; B%d", label->block_id());
+ }
+ __ bind(label->label());
+ current_block_ = label->block_id();
+ LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+ // xmm0 must always be a scratch register.
+ XMMRegister xmm_scratch = xmm0;
+ LUnallocated marker_operand(LUnallocated::NONE);
+
+ Register cpu_scratch = kScratchRegister;
+
+ const ZoneList<LMoveOperands>* moves =
+ resolver_.Resolve(move->move_operands(), &marker_operand);
+ for (int i = moves->length() - 1; i >= 0; --i) {
+ LMoveOperands move = moves->at(i);
+ LOperand* from = move.source();
+ LOperand* to = move.destination();
+ ASSERT(!from->IsDoubleRegister() ||
+ !ToDoubleRegister(from).is(xmm_scratch));
+ ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
+ ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
+ ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
+ if (from->IsConstantOperand()) {
+ LConstantOperand* constant_from = LConstantOperand::cast(from);
+ if (to->IsRegister()) {
+ if (IsInteger32Constant(constant_from)) {
+ __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
+ } else {
+ __ Move(ToRegister(to), ToHandle(constant_from));
+ }
+ } else {
+ if (IsInteger32Constant(constant_from)) {
+ __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
+ } else {
+ __ Move(ToOperand(to), ToHandle(constant_from));
+ }
+ }
+ } else if (from == &marker_operand) {
+ if (to->IsRegister()) {
+ __ movq(ToRegister(to), cpu_scratch);
+ } else if (to->IsStackSlot()) {
+ __ movq(ToOperand(to), cpu_scratch);
+ } else if (to->IsDoubleRegister()) {
+ __ movsd(ToDoubleRegister(to), xmm_scratch);
+ } else {
+ ASSERT(to->IsDoubleStackSlot());
+ __ movsd(ToOperand(to), xmm_scratch);
+ }
+ } else if (to == &marker_operand) {
+ if (from->IsRegister()) {
+ __ movq(cpu_scratch, ToRegister(from));
+ } else if (from->IsStackSlot()) {
+ __ movq(cpu_scratch, ToOperand(from));
+ } else if (from->IsDoubleRegister()) {
+ __ movsd(xmm_scratch, ToDoubleRegister(from));
+ } else {
+ ASSERT(from->IsDoubleStackSlot());
+ __ movsd(xmm_scratch, ToOperand(from));
+ }
+ } else if (from->IsRegister()) {
+ if (to->IsRegister()) {
+ __ movq(ToRegister(to), ToRegister(from));
+ } else {
+ __ movq(ToOperand(to), ToRegister(from));
+ }
+ } else if (to->IsRegister()) {
+ __ movq(ToRegister(to), ToOperand(from));
+ } else if (from->IsStackSlot()) {
+ ASSERT(to->IsStackSlot());
+ __ push(rax);
+ __ movq(rax, ToOperand(from));
+ __ movq(ToOperand(to), rax);
+ __ pop(rax);
+ } else if (from->IsDoubleRegister()) {
+ ASSERT(to->IsDoubleStackSlot());
+ __ movsd(ToOperand(to), ToDoubleRegister(from));
+ } else if (to->IsDoubleRegister()) {
+ ASSERT(from->IsDoubleStackSlot());
+ __ movsd(ToDoubleRegister(to), ToOperand(from));
+ } else {
+ ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+ __ movsd(xmm_scratch, ToOperand(from));
+ __ movsd(ToOperand(to), xmm_scratch);
+ }
+ }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+ for (int i = LGap::FIRST_INNER_POSITION;
+ i <= LGap::LAST_INNER_POSITION;
+ i++) {
+ LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+ LParallelMove* move = gap->GetParallelMove(inner_pos);
+ if (move != NULL) DoParallelMove(move);
+ }
+
+ LInstruction* next = GetNextInstruction();
+ if (next != NULL && next->IsLazyBailout()) {
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+ }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+ Abort("Unimplemented: %s", "DoCallStub");
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+ // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ Abort("Unimplemented: %s", "DoModI");
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ Abort("Unimplemented: %s", "DoDivI");}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+ Abort("Unimplemented: %s", "DoMultI");}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+ Abort("Unimplemented: %s", "DoBitI");}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+ Abort("Unimplemented: %s", "DoShiftI");
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+ Abort("Unimplemented: %s", "DoSubI");
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ movl(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+ ASSERT(instr->result()->IsDoubleRegister());
+ XMMRegister res = ToDoubleRegister(instr->result());
+ double v = instr->value();
+ // Use xor to produce +0.0 in a fast and compact way, but avoid to
+ // do so if the constant is -0.0.
+ if (BitCast<uint64_t, double>(v) == 0) {
+ __ xorpd(res, res);
+ } else {
+ Register tmp = ToRegister(instr->TempAt(0));
+ int32_t v_int32 = static_cast<int32_t>(v);
+ if (static_cast<double>(v_int32) == v) {
+ __ movl(tmp, Immediate(v_int32));
+ __ cvtlsi2sd(res, tmp);
+ } else {
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ __ Set(tmp, int_val);
+ __ movd(res, tmp);
+ }
+ }
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+ ASSERT(instr->result()->IsRegister());
+ __ Move(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
+ Abort("Unimplemented: %s", "DoJSArrayLength");
+}
+
+
+void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
+ Abort("Unimplemented: %s", "DoFixedArrayLength");
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+ Abort("Unimplemented: %s", "DoValueOf");
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+ Abort("Unimplemented: %s", "DoBitNotI");
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+ Abort("Unimplemented: %s", "DoThrow");
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ addl(ToRegister(left), ToRegister(right));
+ } else {
+ __ addl(ToRegister(left), ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ Abort("Unimplemented: %s", "DoArithmeticD");
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+ ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
+ ASSERT(ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS);
+ stub.SetArgsInRegisters();
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+ for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+ LLabel* label = chunk_->GetLabel(i);
+ if (!label->HasReplacement()) return i;
+ }
+ return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+ Abort("Unimplemented: %s", "EmitBranch");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+ Abort("Unimplemented: %s", "DoBranch");
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+ block = chunk_->LookupDestination(block);
+ int next_block = GetNextEmittedBlock(current_block_);
+ if (block != next_block) {
+ // Perform stack overflow check if this goto needs it before jumping.
+ if (deferred_stack_check != NULL) {
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, chunk_->GetAssemblyLabel(block));
+ __ jmp(deferred_stack_check->entry());
+ deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+ } else {
+ __ jmp(chunk_->GetAssemblyLabel(block));
+ }
+ }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+ Abort("Unimplemented: %s", "DoDeferredStackCheck");
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LGoto* instr_;
+ };
+
+ DeferredStackCheck* deferred = NULL;
+ if (instr->include_stack_check()) {
+ deferred = new DeferredStackCheck(this, instr);
+ }
+ EmitGoto(instr->block_id(), deferred);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+ Condition cond = no_condition;
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT:
+ cond = equal;
+ break;
+ case Token::LT:
+ cond = is_unsigned ? below : less;
+ break;
+ case Token::GT:
+ cond = is_unsigned ? above : greater;
+ break;
+ case Token::LTE:
+ cond = is_unsigned ? below_equal : less_equal;
+ break;
+ case Token::GTE:
+ cond = is_unsigned ? above_equal : greater_equal;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+ return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+ Abort("Unimplemented: %s", "EmitCmpI");
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+ Abort("Unimplemented: %s", "DoCmpID");
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoCmpIDAndBranch");
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+ Abort("Unimplemented: %s", "DoCmpJSObjectEq");
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch");
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+ Abort("Unimplemented: %s", "DoIsNull");
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoIsNullAndBranch");
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object) {
+ Abort("Unimplemented: %s", "EmitIsObject");
+ return below_equal;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+ Abort("Unimplemented: %s", "DoIsObject");
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoIsObjectAndBranch");
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+ Abort("Unimplemented: %s", "DoIsSmi");
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoIsSmiAndBranch");
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+ Abort("Unimplemented: %s", "DoHasInstanceType");
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+ Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+ LHasCachedArrayIndexAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch");
+}
+
+
+// Branches to a label or falls through with the answer in the z flag. Trashes
+// the temp registers, but not the input. Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+ Label* is_false,
+ Handle<String>class_name,
+ Register input,
+ Register temp,
+ Register temp2) {
+ Abort("Unimplemented: %s", "EmitClassOfTest");
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+ Abort("Unimplemented: %s", "DoClassOfTest");
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoClassOfTestAndBranch");
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoCmpMapAndBranch");
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+ Abort("Unimplemented: %s", "DoInstanceOf");
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoInstanceOfAndBranch");
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal");
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl");
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+ Abort("Unimplemented: %s", "DoCmpT");
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoCmpTAndBranch");
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+ if (FLAG_trace) {
+ // Preserve the return value on the stack and rely on the runtime
+ // call to return the value in the same register.
+ __ push(rax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ __ ret((ParameterCount() + 1) * kPointerSize);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+ Abort("Unimplemented: %s", "DoLoadGlobal");
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+ Abort("Unimplemented: %s", "DoStoreGlobal");
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+ Abort("Unimplemented: %s", "DoLoadContextSlot");
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ Abort("Unimplemented: %s", "DoLoadNamedField");
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+ Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+ Abort("Unimplemented: %s", "DoLoadElements");
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+ Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+ Abort("Unimplemented: %s", "DoArgumentsElements");
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+ Abort("Unimplemented: %s", "DoArgumentsLength");
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Abort("Unimplemented: %s", "DoApplyArguments");
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+ Abort("Unimplemented: %s", "DoPushArgument");
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+ Abort("Unimplemented: %s", "DoGlobalObject");
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+ Abort("Unimplemented: %s", "DoGlobalReceiver");
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr) {
+ Abort("Unimplemented: %s", "CallKnownFunction");
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+ Abort("Unimplemented: %s", "DoCallConstantFunction");
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathAbs");
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathFloor");
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathRound");
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathSqrt");
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathPowHalf");
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+ Abort("Unimplemented: %s", "DoPower");
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathLog");
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathCos");
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoMathSin");
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoUnaryMathOperation");
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+ Abort("Unimplemented: %s", "DoCallKeyed");
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+ Abort("Unimplemented: %s", "DoCallNamed");
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ Abort("Unimplemented: %s", "DoCallFunction");
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+ Abort("Unimplemented: %s", "DoCallGlobal");
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+ Abort("Unimplemented: %s", "DoCallKnownGlobal");
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+ Abort("Unimplemented: %s", "DoCallNew");
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+ Abort("Unimplemented: %s", "DoCallRuntime");
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Abort("Unimplemented: %s", "DoStoreNamedField");
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+ Abort("Unimplemented: %s", "DoBoundsCheck");
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ Abort("Unimplemented: %s", "DoInteger32ToDouble");
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+ Abort("Unimplemented: %s", "DoNumberTagI");
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+ Abort("Unimplemented: %s", "DoDeferredNumberTagI");
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+ Abort("Unimplemented: %s", "DoNumberTagD");
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+ Abort("Unimplemented: %s", "DoDeferredNumberTagD");
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+ Abort("Unimplemented: %s", "DoSmiTag");
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+ Abort("Unimplemented: %s", "DoSmiUntag");
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+ XMMRegister result_reg,
+ LEnvironment* env) {
+ Abort("Unimplemented: %s", "EmitNumberUntagD");
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+ Abort("Unimplemented: %s", "DoDeferredTaggedToI");
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ Abort("Unimplemented: %s", "DoTaggedToI");
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+ Abort("Unimplemented: %s", "DoNumberUntagD");
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+ Abort("Unimplemented: %s", "DoDoubleToI");
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+ Abort("Unimplemented: %s", "DoCheckSmi");
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+ Abort("Unimplemented: %s", "DoCheckInstanceType");
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+ Abort("Unimplemented: %s", "DoCheckFunction");
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+ Abort("Unimplemented: %s", "DoCheckMap");
+}
+
+
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+ Abort("Unimplemented: %s", "LoadHeapObject");
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ Abort("Unimplemented: %s", "DoArrayLiteral");
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ Abort("Unimplemented: %s", "DoObjectLiteral");
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ Abort("Unimplemented: %s", "DoRegExpLiteral");
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ Abort("Unimplemented: %s", "DoFunctionLiteral");
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+ Abort("Unimplemented: %s", "DoTypeof");
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+ Abort("Unimplemented: %s", "DoTypeofIs");
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoTypeofIsAndBranch");
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name) {
+ Abort("Unimplemented: %s", "EmitTypeofIs");
+ return no_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+ // No code for lazy bailout instruction. Used to capture environment after a
+ // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+ DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+ Abort("Unimplemented: %s", "DoDeleteProperty");
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ // Perform stack overflow check.
+ NearLabel done;
+ ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &done);
+
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+ Abort("Unimplemented: %s", "DoOsrEntry");
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index cd1f08de..8d1c5c4e 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "x64/lithium-x64.h"
+#include "checks.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
@@ -39,22 +40,256 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ LGapResolver();
+ const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
+ LOperand* marker_operand);
+
+ private:
+ LGapNode* LookupNode(LOperand* operand);
+ bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+ bool CanReach(LGapNode* a, LGapNode* b);
+ void RegisterMove(LMoveOperands move);
+ void AddResultMove(LOperand* from, LOperand* to);
+ void AddResultMove(LGapNode* from, LGapNode* to);
+ void ResolveCycle(LGapNode* start, LOperand* marker_operand);
+
+ ZoneList<LGapNode*> nodes_;
+ ZoneList<LGapNode*> identified_cycles_;
+ ZoneList<LMoveOperands> result_;
+ int next_visited_id_;
+};
+
class LCodeGen BASE_EMBEDDED {
public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+ : chunk_(chunk),
+ masm_(assembler),
+ info_(info),
+ current_block_(-1),
+ current_instruction_(-1),
+ instructions_(chunk->instructions()),
+ deoptimizations_(4),
+ deoptimization_literals_(8),
+ inlined_function_count_(0),
+ scope_(chunk->graph()->info()->scope()),
+ status_(UNUSED),
+ deferred_(8),
+ osr_pc_offset_(-1) {
+ PopulateDeoptimizationLiteralsWithInlinedFunctions();
+ }
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
- bool GenerateCode() {
- UNIMPLEMENTED();
- return false;
- }
+ bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
- void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+ void FinishCode(Handle<Code> code);
+
+ // Deferred code support.
+ void DoDeferredNumberTagD(LNumberTagD* instr);
+ void DoDeferredNumberTagI(LNumberTagI* instr);
+ void DoDeferredTaggedToI(LTaggedToI* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
+
+ // Parallel move support.
+ void DoParallelMove(LParallelMove* move);
+
+ // Emit frame translation commands for an environment.
+ void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+ enum Status {
+ UNUSED,
+ GENERATING,
+ DONE,
+ ABORTED
+ };
+
+ bool is_unused() const { return status_ == UNUSED; }
+ bool is_generating() const { return status_ == GENERATING; }
+ bool is_done() const { return status_ == DONE; }
+ bool is_aborted() const { return status_ == ABORTED; }
+
+ LChunk* chunk() const { return chunk_; }
+ Scope* scope() const { return scope_; }
+ HGraph* graph() const { return chunk_->graph(); }
+ MacroAssembler* masm() const { return masm_; }
+
+ int GetNextEmittedBlock(int block);
+ LInstruction* GetNextInstruction();
+
+ void EmitClassOfTest(Label* if_true,
+ Label* if_false,
+ Handle<String> class_name,
+ Register input,
+ Register temporary,
+ Register temporary2);
+
+ int StackSlotCount() const { return chunk()->spill_slot_count(); }
+ int ParameterCount() const { return scope()->num_parameters(); }
+
+ void Abort(const char* format, ...);
+ void Comment(const char* format, ...);
+
+ void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+ // Code generation passes. Returns true if code generation should
+ // continue.
+ bool GeneratePrologue();
+ bool GenerateBody();
+ bool GenerateDeferredCode();
+ bool GenerateSafepointTable();
+
+ void CallCode(Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr);
+ void CallRuntime(Runtime::Function* function,
+ int num_arguments,
+ LInstruction* instr);
+ void CallRuntime(Runtime::FunctionId id,
+ int num_arguments,
+ LInstruction* instr) {
+ Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, num_arguments, instr);
+ }
+
+ // Generate a direct call to a known function. Expects the function
+ // to be in edi.
+ void CallKnownFunction(Handle<JSFunction> function,
+ int arity,
+ LInstruction* instr);
+
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+
+ void RegisterLazyDeoptimization(LInstruction* instr);
+ void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+ void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+ void AddToTranslation(Translation* translation,
+ LOperand* op,
+ bool is_tagged);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+ Register ToRegister(int index) const;
+ XMMRegister ToDoubleRegister(int index) const;
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ int ToInteger32(LConstantOperand* op) const;
+ bool IsTaggedConstant(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op) const;
+
+ // Specific math operations - used from DoUnaryMathOperation.
+ void DoMathAbs(LUnaryMathOperation* instr);
+ void DoMathFloor(LUnaryMathOperation* instr);
+ void DoMathRound(LUnaryMathOperation* instr);
+ void DoMathSqrt(LUnaryMathOperation* instr);
+ void DoMathPowHalf(LUnaryMathOperation* instr);
+ void DoMathLog(LUnaryMathOperation* instr);
+ void DoMathCos(LUnaryMathOperation* instr);
+ void DoMathSin(LUnaryMathOperation* instr);
+
+ // Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepointWithRegisters(LPointerMap* pointers,
+ int arguments,
+ int deoptimization_index);
+ void RecordPosition(int position);
+
+ static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+ void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitBranch(int left_block, int right_block, Condition cc);
+ void EmitCmpI(LOperand* left, LOperand* right);
+ void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+
+ // Emits optimized code for typeof x == "y". Modifies input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitTypeofIs(Label* true_label, Label* false_label,
+ Register input, Handle<String> type_name);
+
+ // Emits optimized code for %_IsObject(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsObject(Register input,
+ Register temp1,
+ Register temp2,
+ Label* is_not_object,
+ Label* is_object);
+
+ LChunk* const chunk_;
+ MacroAssembler* const masm_;
+ CompilationInfo* const info_;
+
+ int current_block_;
+ int current_instruction_;
+ const ZoneList<LInstruction*>* instructions_;
+ ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<Handle<Object> > deoptimization_literals_;
+ int inlined_function_count_;
+ Scope* const scope_;
+ Status status_;
+ TranslationBuffer translations_;
+ ZoneList<LDeferredCode*> deferred_;
+ int osr_pc_offset_;
+
+ // Builder that keeps track of safepoints in the code. The table
+ // itself is emitted at the end of the generated code.
+ SafepointTableBuilder safepoints_;
+
+ // Compiler from a set of parallel moves to a sequential list of moves.
+ LGapResolver resolver_;
+
+ friend class LDeferredCode;
+ friend class LEnvironment;
+ friend class SafepointGenerator;
+ DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+ explicit LDeferredCode(LCodeGen* codegen)
+ : codegen_(codegen), external_exit_(NULL) {
+ codegen->AddDeferredCode(this);
+ }
+
+ virtual ~LDeferredCode() { }
+ virtual void Generate() = 0;
+
+ void SetExit(Label *exit) { external_exit_ = exit; }
+ Label* entry() { return &entry_; }
+ Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+ LCodeGen* codegen() const { return codegen_; }
+ MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+ LCodeGen* codegen_;
+ Label entry_;
+ Label exit_;
+ Label* external_exit_;
};
} } // namespace v8::internal
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 8afa9d47..5ef6eb75 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -25,12 +25,442 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
#include "x64/lithium-x64.h"
#include "x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
+#define DEFINE_COMPILE(type) \
+ void L##type::CompileToNative(LCodeGen* generator) { \
+ generator->Do##type(this); \
+ }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ register_spills_[i] = NULL;
+ }
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ double_register_spills_[i] = NULL;
+ }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsStackSlot());
+ ASSERT(register_spills_[allocation_index] == NULL);
+ register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ ASSERT(spill_operand->IsDoubleStackSlot());
+ ASSERT(double_register_spills_[allocation_index] == NULL);
+ double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+ stream->Add("%s ", this->Mnemonic());
+ if (HasResult()) {
+ PrintOutputOperandTo(stream);
+ }
+
+ PrintDataTo(stream);
+
+ if (HasEnvironment()) {
+ stream->Add(" ");
+ environment()->PrintTo(stream);
+ }
+
+ if (HasPointerMap()) {
+ stream->Add(" ");
+ pointer_map()->PrintTo(stream);
+ }
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ inputs_.PrintOperandsTo(stream);
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ results_.PrintOperandsTo(stream);
+}
+
+
+template<typename T, int N>
+void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
+ for (int i = 0; i < N; i++) {
+ if (i > 0) stream->Add(" ");
+ elems_[i]->PrintTo(stream);
+ }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+ LGap::PrintDataTo(stream);
+ LLabel* rep = replacement();
+ if (rep != NULL) {
+ stream->Add(" Dead block replaced with B%d", rep->block_id());
+ }
+}
+
+
+bool LGap::IsRedundant() const {
+ for (int i = 0; i < 4; i++) {
+ if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+ for (int i = 0; i < 4; i++) {
+ stream->Add("(");
+ if (parallel_moves_[i] != NULL) {
+ parallel_moves_[i]->PrintDataTo(stream);
+ }
+ stream->Add(") ");
+ }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-d";
+ case Token::SUB: return "sub-d";
+ case Token::MUL: return "mul-d";
+ case Token::DIV: return "div-d";
+ case Token::MOD: return "mod-d";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+ switch (op()) {
+ case Token::ADD: return "add-t";
+ case Token::SUB: return "sub-t";
+ case Token::MUL: return "mul-t";
+ case Token::MOD: return "mod-t";
+ case Token::DIV: return "div-t";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" %s ", Token::String(op()));
+ InputAt(1)->PrintTo(stream);
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_object(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_smi(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_instance_type(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if has_cached_array_index(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if class_of_test(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(", \"%o\") then B%d else B%d",
+ *hydrogen()->class_name(),
+ true_block_id(),
+ false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if typeof ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" == \"%s\" then B%d else B%d",
+ *hydrogen()->type_literal()->ToCString(),
+ true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
+ stream->Add("/%s ", hydrogen()->OpName());
+ InputAt(0)->PrintTo(stream);
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+ stream->Add("(%d, %d)", context_chain_length(), slot_index());
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) {
+ stream->Add("[ecx] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) {
+ SmartPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) {
+ SmartPointer<char> name_string = name()->ToCString();
+ stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
+ stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) {
+ stream->Add("= class_of_test(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+ arguments()->PrintTo(stream);
+
+ stream->Add(" length ");
+ length()->PrintTo(stream);
+
+ stream->Add(" index ");
+ index()->PrintTo(stream);
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+ return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+ // All stack slots are Double stack slots on x64.
+ // Alternatively, at some point, start using half-size
+ // stack slots for int32 values.
+ int index = GetNextSpillIndex(is_double);
+ if (is_double) {
+ return LDoubleStackSlot::Create(index);
+ } else {
+ return LStackSlot::Create(index);
+ }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+ HPhase phase("Mark empty blocks", this);
+ for (int i = 0; i < graph()->blocks()->length(); ++i) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ int first = block->first_instruction_index();
+ int last = block->last_instruction_index();
+ LInstruction* first_instr = instructions()->at(first);
+ LInstruction* last_instr = instructions()->at(last);
+
+ LLabel* label = LLabel::cast(first_instr);
+ if (last_instr->IsGoto()) {
+ LGoto* goto_instr = LGoto::cast(last_instr);
+ if (!goto_instr->include_stack_check() &&
+ label->IsRedundant() &&
+ !label->is_loop_header()) {
+ bool can_eliminate = true;
+ for (int i = first + 1; i < last && can_eliminate; ++i) {
+ LInstruction* cur = instructions()->at(i);
+ if (cur->IsGap()) {
+ LGap* gap = LGap::cast(cur);
+ if (!gap->IsRedundant()) {
+ can_eliminate = false;
+ }
+ } else {
+ can_eliminate = false;
+ }
+ }
+
+ if (can_eliminate) {
+ label->set_replacement(GetLabel(goto_instr->block_id()));
+ }
+ }
+ }
+ }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+ LGap* gap = new LGap(block);
+ int index = -1;
+ if (instr->IsControl()) {
+ instructions_.Add(gap);
+ index = instructions_.length();
+ instructions_.Add(instr);
+ } else {
+ index = instructions_.length();
+ instructions_.Add(instr);
+ instructions_.Add(gap);
+ }
+ if (instr->HasPointerMap()) {
+ pointer_maps_.Add(instr->pointer_map());
+ instr->pointer_map()->set_lithium_position(index);
+ }
+ return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+ return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+ // The receiver is at index 0, the first parameter at index 1, so we
+ // shift all parameter indexes down by the number of parameters, and
+ // make sure they end up negative so they are distinguishable from
+ // spill slots.
+ int result = index - graph()->info()->scope()->num_parameters() - 1;
+ ASSERT(result < 0);
+ return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+ ASSERT(-1 <= index); // -1 is the receiver.
+ return (1 + graph()->info()->scope()->num_parameters() - index) *
+ kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+ return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+ return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+ while (!IsGapAt(index)) index--;
+ return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+ GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+ return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+ LConstantOperand* operand) const {
+ return graph_->LookupValue(operand->index())->representation();
+}
+
+
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new LChunk(graph());
@@ -62,10 +492,988 @@ void LChunkBuilder::Abort(const char* format, ...) {
}
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+ return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+ return new LUnallocated(LUnallocated::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+ return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ XMMRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+ return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+ return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+ return Use(value,
+ new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+ return Use(value, new LUnallocated(LUnallocated::NONE,
+ LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+ return value->IsConstant()
+ ? chunk_->DefineConstantOperand(HConstant::cast(value))
+ : Use(value, new LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+ if (value->EmitAtUses()) {
+ HInstruction* instr = HInstruction::cast(value);
+ VisitInstruction(instr);
+ }
+ allocator_->RecordUse(value, operand);
+ return operand;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr,
+ int index) {
+ return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg) {
+ return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+ HEnvironment* hydrogen_env = current_block_->last_environment();
+ instr->set_environment(CreateEnvironment(hydrogen_env));
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id) {
+ ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ instructions_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = ast_id;
+ return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+ instructions_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize) {
+ allocator_->MarkAsCall();
+ instr = AssignPointerMap(instr);
+
+ if (hinstr->HasSideEffects()) {
+ ASSERT(hinstr->next()->IsSimulate());
+ HSimulate* sim = HSimulate::cast(hinstr->next());
+ instr = SetInstructionPendingDeoptimizationEnvironment(
+ instr, sim->ast_id());
+ }
+
+ // If instruction does not have side-effects lazy deoptimization
+ // after the call will try to deoptimize to the point before the call.
+ // Thus we still need to attach environment to this call even if
+ // call sequence can not deoptimize eagerly.
+ bool needs_environment =
+ (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+ if (needs_environment && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ allocator_->MarkAsSaveDoubles();
+ return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
+ return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+ LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+ LUnallocated* operand = ToUnallocated(reg);
+ allocator_->RecordTemporary(operand);
+ return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+ return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+ HBitwiseBinaryOperation* instr) {
+ Abort("Unimplemented: %s", "DoBit");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ Abort("Unimplemented: %s", "DoArithmeticD");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr) {
+ ASSERT(op == Token::ADD ||
+ op == Token::DIV ||
+ op == Token::MOD ||
+ op == Token::MUL ||
+ op == Token::SUB);
+ HValue* left = instr->left();
+ HValue* right = instr->right();
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ LOperand* left_operand = UseFixed(left, rdx);
+ LOperand* right_operand = UseFixed(right, rax);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
- Abort("Lithium not implemented on x64.");
+ current_block_ = block;
+ next_block_ = next_block;
+ if (block->IsStartBlock()) {
+ block->UpdateEnvironment(graph_->start_environment());
+ argument_count_ = 0;
+ } else if (block->predecessors()->length() == 1) {
+ // We have a single predecessor => copy environment and outgoing
+ // argument count from the predecessor.
+ ASSERT(block->phis()->length() == 0);
+ HBasicBlock* pred = block->predecessors()->at(0);
+ HEnvironment* last_environment = pred->last_environment();
+ ASSERT(last_environment != NULL);
+ // Only copy the environment, if it is later used again.
+ if (pred->end()->SecondSuccessor() == NULL) {
+ ASSERT(pred->end()->FirstSuccessor() == block);
+ } else {
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+ last_environment = last_environment->Copy();
+ }
+ }
+ block->UpdateEnvironment(last_environment);
+ ASSERT(pred->argument_count() >= 0);
+ argument_count_ = pred->argument_count();
+ } else {
+ // We are at a state join => process phis.
+ HBasicBlock* pred = block->predecessors()->at(0);
+ // No need to copy the environment, it cannot be used later.
+ HEnvironment* last_environment = pred->last_environment();
+ for (int i = 0; i < block->phis()->length(); ++i) {
+ HPhi* phi = block->phis()->at(i);
+ last_environment->SetValueAt(phi->merged_index(), phi);
+ }
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+ last_environment->SetValueAt(block->deleted_phis()->at(i),
+ graph_->GetConstantUndefined());
+ }
+ block->UpdateEnvironment(last_environment);
+ // Pick up the outgoing argument count of one of the predecessors.
+ argument_count_ = pred->argument_count();
+ }
+ HInstruction* current = block->first();
+ int start = chunk_->instructions()->length();
+ while (current != NULL && !is_aborted()) {
+ // Code for constants in registers is generated lazily.
+ if (!current->EmitAtUses()) {
+ VisitInstruction(current);
+ }
+ current = current->next();
+ }
+ int end = chunk_->instructions()->length() - 1;
+ if (end >= start) {
+ block->set_first_instruction_index(start);
+ block->set_last_instruction_index(end);
+ }
+ block->set_argument_count(argument_count_);
+ next_block_ = NULL;
+ current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+ HInstruction* old_current = current_instruction_;
+ current_instruction_ = current;
+ allocator_->BeginInstruction();
+ if (current->has_position()) position_ = current->position();
+ LInstruction* instr = current->CompileToLithium(this);
+
+ if (instr != NULL) {
+ if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+ instr = AssignPointerMap(instr);
+ }
+ if (FLAG_stress_environments && !instr->HasEnvironment()) {
+ instr = AssignEnvironment(instr);
+ }
+ if (current->IsBranch() && !instr->IsGoto()) {
+ // TODO(fschneider): Handle branch instructions uniformly like
+ // other instructions. This requires us to generate the right
+ // branch instruction already at the HIR level.
+ ASSERT(instr->IsControl());
+ HBranch* branch = HBranch::cast(current);
+ instr->set_hydrogen_value(branch->value());
+ HBasicBlock* first = branch->FirstSuccessor();
+ HBasicBlock* second = branch->SecondSuccessor();
+ ASSERT(first != NULL && second != NULL);
+ instr->SetBranchTargets(first->block_id(), second->block_id());
+ } else {
+ instr->set_hydrogen_value(current);
+ }
+
+ int index = chunk_->AddInstruction(instr, current_block_);
+ allocator_->SummarizeInstruction(index);
+ } else {
+ // This instruction should be omitted.
+ allocator_->OmitInstruction();
+ }
+ current_instruction_ = old_current;
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+ if (hydrogen_env == NULL) return NULL;
+
+ LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ int ast_id = hydrogen_env->ast_id();
+ ASSERT(ast_id != AstNode::kNoNumber);
+ int value_count = hydrogen_env->length();
+ LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ ast_id,
+ hydrogen_env->parameter_count(),
+ argument_count_,
+ value_count,
+ outer);
+ int argument_index = 0;
+ for (int i = 0; i < value_count; ++i) {
+ HValue* value = hydrogen_env->values()->at(i);
+ LOperand* op = NULL;
+ if (value->IsArgumentsObject()) {
+ op = NULL;
+ } else if (value->IsPushArgument()) {
+ op = new LArgument(argument_index++);
+ } else {
+ op = UseAny(value);
+ }
+ result->AddValue(op, value->representation());
+ }
+
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+ LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
+ instr->include_stack_check());
+ return (instr->include_stack_check())
+ ? AssignPointerMap(result)
+ : result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+ Abort("Unimplemented: %s", "DoBranch");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+ HCompareMapAndBranch* instr) {
+ Abort("Unimplemented: %s", "DoCompareMapAndBranch");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+ Abort("Unimplemented: %s", "DoArgumentsLength");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+ Abort("Unimplemented: %s", "DoArgumentsElements");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ Abort("Unimplemented: %s", "DoInstanceOf");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+ HInstanceOfKnownGlobal* instr) {
+ Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+ Abort("Unimplemented: %s", "DoApplyArguments");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+ Abort("Unimplemented: %s", "DoPushArgument");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+ Abort("Unimplemented: %s", "DoGlobalObject");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+ Abort("Unimplemented: %s", "DoGlobalReceiver");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+ HCallConstantFunction* instr) {
+ Abort("Unimplemented: %s", "DoCallConstantFunction");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+ Abort("Unimplemented: %s", "DoUnaryMathOperation");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+ Abort("Unimplemented: %s", "DoCallKeyed");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+ Abort("Unimplemented: %s", "DoCallNamed");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+ Abort("Unimplemented: %s", "DoCallGlobal");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+ Abort("Unimplemented: %s", "DoCallKnownGlobal");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+ Abort("Unimplemented: %s", "DoCallNew");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ Abort("Unimplemented: %s", "DoCallFunction");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+ Abort("Unimplemented: %s", "DoCallRuntime");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+ Abort("Unimplemented: %s", "DoShr");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+ Abort("Unimplemented: %s", "DoSar");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+ Abort("Unimplemented: %s", "DoShl");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+ Abort("Unimplemented: %s", "DoBitAnd");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+ Abort("Unimplemented: %s", "DoBitNot");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+ Abort("Unimplemented: %s", "DoBitOr");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+ Abort("Unimplemented: %s", "DoBitXor");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+ Abort("Unimplemented: %s", "DoDiv");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+ Abort("Unimplemented: %s", "DoMod");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+ Abort("Unimplemented: %s", "DoMul");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+ Abort("Unimplemented: %s", "DoSub");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LAddI* add = new LAddI(left, right);
+ LInstruction* result = DefineSameAsFirst(add);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ Abort("Unimplemented: %s", "DoAdd on Doubles");
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::ADD, instr);
+ }
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+ Abort("Unimplemented: %s", "DoPower");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+ Abort("Unimplemented: %s", "DoCompare");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+ HCompareJSObjectEq* instr) {
+ Abort("Unimplemented: %s", "DoCompareJSObjectEq");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+ Abort("Unimplemented: %s", "DoIsNull");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+ Abort("Unimplemented: %s", "DoIsObject");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+ Abort("Unimplemented: %s", "DoIsSmi");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+ Abort("Unimplemented: %s", "DoHasInstanceType");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+ HHasCachedArrayIndex* instr) {
+ Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+ return NULL;
}
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+ Abort("Unimplemented: %s", "DoClassOfTest");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
+ Abort("Unimplemented: %s", "DoJSArrayLength");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
+ Abort("Unimplemented: %s", "DoFixedArrayLength");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+ Abort("Unimplemented: %s", "DoValueOf");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+ Abort("Unimplemented: %s", "DoBoundsCheck");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ Abort("Unimplemented: %s", "DoThrow");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+ Abort("Unimplemented: %s", "DoChange");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+ Abort("Unimplemented: %s", "DoCheckNonSmi");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+ Abort("Unimplemented: %s", "DoCheckInstanceType");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+ Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ Abort("Unimplemented: %s", "DoCheckSmi");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+ Abort("Unimplemented: %s", "DoCheckFunction");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+ Abort("Unimplemented: %s", "DoCheckMap");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+ return new LReturn(UseFixed(instr->value(), rax));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+ Representation r = instr->representation();
+ if (r.IsInteger32()) {
+ int32_t value = instr->Integer32Value();
+ return DefineAsRegister(new LConstantI(value));
+ } else if (r.IsDouble()) {
+ double value = instr->DoubleValue();
+ LOperand* temp = TempRegister();
+ return DefineAsRegister(new LConstantD(value, temp));
+ } else if (r.IsTagged()) {
+ return DefineAsRegister(new LConstantT(instr->handle()));
+ } else {
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+ Abort("Unimplemented: %s", "DoLoadGlobal");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+ Abort("Unimplemented: %s", "DoStoreGlobal");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+ Abort("Unimplemented: %s", "DoLoadContextSlot");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+ Abort("Unimplemented: %s", "DoLoadNamedField");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoLoadNamedGeneric");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+ HLoadFunctionPrototype* instr) {
+ Abort("Unimplemented: %s", "DoLoadFunctionPrototype");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+ Abort("Unimplemented: %s", "DoLoadElements");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ Abort("Unimplemented: %s", "DoLoadKeyedFastElement");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+ Abort("Unimplemented: %s", "DoStoreNamedField");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+ Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+ Abort("Unimplemented: %s", "DoArrayLiteral");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+ Abort("Unimplemented: %s", "DoObjectLiteral");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+ Abort("Unimplemented: %s", "DoRegExpLiteral");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+ Abort("Unimplemented: %s", "DoFunctionLiteral");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+ Abort("Unimplemented: %s", "DoDeleteProperty");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ Abort("Unimplemented: %s", "DoOsrEntry");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+ Abort("Unimplemented: %s", "DoUnknownOSRValue");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+ Abort("Unimplemented: %s", "DoCallStub");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+ Abort("Unimplemented: %s", "DoArgumentsObject");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+ Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+ Abort("Unimplemented: %s", "DoTypeof");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+ Abort("Unimplemented: %s", "DoTypeofIs");
+ return NULL;
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+ HEnvironment* env = current_block_->last_environment();
+ ASSERT(env != NULL);
+
+ env->set_ast_id(instr->ast_id());
+
+ env->Drop(instr->pop_count());
+ for (int i = 0; i < instr->values()->length(); ++i) {
+ HValue* value = instr->values()->at(i);
+ if (instr->HasAssignedIndexAt(i)) {
+ env->Bind(instr->GetAssignedIndexAt(i), value);
+ } else {
+ env->Push(value);
+ }
+ }
+ ASSERT(env->length() == instr->environment_length());
+
+ // If there is an instruction pending deoptimization environment create a
+ // lazy bailout instruction to capture the environment.
+ if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ LLazyBailout* lazy_bailout = new LLazyBailout;
+ LInstruction* result = AssignEnvironment(lazy_bailout);
+ instructions_pending_deoptimization_environment_->
+ set_deoptimization_environment(result->environment());
+ ClearInstructionPendingDeoptimizationEnvironment();
+ return result;
+ }
+
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+ Abort("Unimplemented: %s", "DoEnterInlined");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+ Abort("Unimplemented: %s", "DoLeaveInlined");
+ return NULL;
+}
+
} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index fcab2356..17d9dda1 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -38,26 +38,271 @@ namespace internal {
// Forward declarations.
class LCodeGen;
-class LEnvironment;
-class Translation;
+
+
+// Type hierarchy:
+//
+// LInstruction
+// LTemplateInstruction
+// LControlInstruction
+// LBranch
+// LClassOfTestAndBranch
+// LCmpJSObjectEqAndBranch
+// LCmpIDAndBranch
+// LHasCachedArrayIndexAndBranch
+// LHasInstanceTypeAndBranch
+// LInstanceOfAndBranch
+// LIsNullAndBranch
+// LIsObjectAndBranch
+// LIsSmiAndBranch
+// LTypeofIsAndBranch
+// LAccessArgumentsAt
+// LArgumentsElements
+// LArgumentsLength
+// LAddI
+// LApplyArguments
+// LArithmeticD
+// LArithmeticT
+// LBitI
+// LBoundsCheck
+// LCmpID
+// LCmpJSObjectEq
+// LCmpT
+// LDivI
+// LInstanceOf
+// LInstanceOfKnownGlobal
+// LLoadKeyedFastElement
+// LLoadKeyedGeneric
+// LModI
+// LMulI
+// LPower
+// LShiftI
+// LSubI
+// LCallConstantFunction
+// LCallFunction
+// LCallGlobal
+// LCallKeyed
+// LCallKnownGlobal
+// LCallNamed
+// LCallRuntime
+// LCallStub
+// LConstant
+// LConstantD
+// LConstantI
+// LConstantT
+// LDeoptimize
+// LFunctionLiteral
+// LGap
+// LLabel
+// LGlobalObject
+// LGlobalReceiver
+// LGoto
+// LLazyBailout
+// LLoadGlobal
+// LCheckPrototypeMaps
+// LLoadContextSlot
+// LArrayLiteral
+// LObjectLiteral
+// LRegExpLiteral
+// LOsrEntry
+// LParameter
+// LRegExpConstructResult
+// LStackCheck
+// LStoreKeyed
+// LStoreKeyedFastElement
+// LStoreKeyedGeneric
+// LStoreNamed
+// LStoreNamedField
+// LStoreNamedGeneric
+// LBitNotI
+// LCallNew
+// LCheckFunction
+// LCheckPrototypeMaps
+// LCheckInstanceType
+// LCheckMap
+// LCheckSmi
+// LClassOfTest
+// LDeleteProperty
+// LDoubleToI
+// LFixedArrayLength
+// LHasCachedArrayIndex
+// LHasInstanceType
+// LInteger32ToDouble
+// LIsNull
+// LIsObject
+// LIsSmi
+// LJSArrayLength
+// LLoadNamedField
+// LLoadNamedGeneric
+// LLoadFunctionPrototype
+// LNumberTagD
+// LNumberTagI
+// LPushArgument
+// LReturn
+// LSmiTag
+// LStoreGlobal
+// LTaggedToI
+// LThrow
+// LTypeof
+// LTypeofIs
+// LUnaryMathOperation
+// LValueOf
+// LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
+ V(ControlInstruction) \
+ V(Constant) \
+ V(Call) \
+ V(StoreKeyed) \
+ V(StoreNamed) \
+ LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+ V(AccessArgumentsAt) \
+ V(AddI) \
+ V(ApplyArguments) \
+ V(ArgumentsElements) \
+ V(ArgumentsLength) \
+ V(ArithmeticD) \
+ V(ArithmeticT) \
+ V(ArrayLiteral) \
+ V(BitI) \
+ V(BitNotI) \
+ V(BoundsCheck) \
+ V(Branch) \
+ V(CallConstantFunction) \
+ V(CallFunction) \
+ V(CallGlobal) \
+ V(CallKeyed) \
+ V(CallKnownGlobal) \
+ V(CallNamed) \
+ V(CallNew) \
+ V(CallRuntime) \
+ V(CallStub) \
+ V(CheckFunction) \
+ V(CheckInstanceType) \
+ V(CheckMap) \
+ V(CheckPrototypeMaps) \
+ V(CheckSmi) \
+ V(CmpID) \
+ V(CmpIDAndBranch) \
+ V(CmpJSObjectEq) \
+ V(CmpJSObjectEqAndBranch) \
+ V(CmpMapAndBranch) \
+ V(CmpT) \
+ V(CmpTAndBranch) \
+ V(ConstantD) \
+ V(ConstantI) \
+ V(ConstantT) \
+ V(DeleteProperty) \
+ V(Deoptimize) \
+ V(DivI) \
+ V(DoubleToI) \
+ V(FunctionLiteral) \
+ V(Gap) \
+ V(GlobalObject) \
+ V(GlobalReceiver) \
+ V(Goto) \
+ V(FixedArrayLength) \
+ V(InstanceOf) \
+ V(InstanceOfAndBranch) \
+ V(InstanceOfKnownGlobal) \
+ V(Integer32ToDouble) \
+ V(IsNull) \
+ V(IsNullAndBranch) \
+ V(IsObject) \
+ V(IsObjectAndBranch) \
+ V(IsSmi) \
+ V(IsSmiAndBranch) \
+ V(JSArrayLength) \
+ V(HasInstanceType) \
+ V(HasInstanceTypeAndBranch) \
+ V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(ClassOfTest) \
+ V(ClassOfTestAndBranch) \
+ V(Label) \
+ V(LazyBailout) \
+ V(LoadContextSlot) \
+ V(LoadElements) \
+ V(LoadGlobal) \
+ V(LoadKeyedFastElement) \
+ V(LoadKeyedGeneric) \
+ V(LoadNamedField) \
+ V(LoadNamedGeneric) \
+ V(LoadFunctionPrototype) \
+ V(ModI) \
+ V(MulI) \
+ V(NumberTagD) \
+ V(NumberTagI) \
+ V(NumberUntagD) \
+ V(ObjectLiteral) \
+ V(OsrEntry) \
+ V(Parameter) \
+ V(Power) \
+ V(PushArgument) \
+ V(RegExpLiteral) \
+ V(Return) \
+ V(ShiftI) \
+ V(SmiTag) \
+ V(SmiUntag) \
+ V(StackCheck) \
+ V(StoreGlobal) \
+ V(StoreKeyedFastElement) \
+ V(StoreKeyedGeneric) \
+ V(StoreNamedField) \
+ V(StoreNamedGeneric) \
+ V(SubI) \
+ V(TaggedToI) \
+ V(Throw) \
+ V(Typeof) \
+ V(TypeofIs) \
+ V(TypeofIsAndBranch) \
+ V(UnaryMathOperation) \
+ V(UnknownOSRValue) \
+ V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type) \
+ virtual bool Is##type() const { return true; } \
+ static L##type* cast(LInstruction* instr) { \
+ ASSERT(instr->Is##type()); \
+ return reinterpret_cast<L##type*>(instr); \
+ }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
+ virtual void CompileToNative(LCodeGen* generator); \
+ virtual const char* Mnemonic() const { return mnemonic; } \
+ DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+ H##type* hydrogen() const { \
+ return H##type::cast(hydrogen_value()); \
+ }
+
class LInstruction: public ZoneObject {
public:
- LInstruction() { }
+ LInstruction()
+ : hydrogen_value_(NULL) { }
virtual ~LInstruction() { }
- virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
- virtual void PrintDataTo(StringStream* stream) const { }
+ virtual void CompileToNative(LCodeGen* generator) = 0;
+ virtual const char* Mnemonic() const = 0;
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
- // Predicates should be generated by macro as in lithium-ia32.h.
- virtual bool IsLabel() const {
- UNIMPLEMENTED();
- return false;
- }
- virtual bool IsOsrEntry() const {
- UNIMPLEMENTED();
- return false;
- }
+ // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+ LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ virtual bool IsControl() const { return false; }
+ virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_.set(env); }
LEnvironment* environment() const { return environment_.get(); }
@@ -67,9 +312,7 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- void set_result(LOperand* operand) { result_.set(operand); }
- LOperand* result() const { return result_.get(); }
- bool HasResult() const { return result_.is_set(); }
+ virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -87,39 +330,79 @@ class LInstruction: public ZoneObject {
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
- SetOncePointer<LOperand> result_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
};
-class LParallelMove : public ZoneObject {
+template<typename T, int N>
+class OperandContainer {
public:
- LParallelMove() : move_operands_(4) { }
-
- void AddMove(LOperand* from, LOperand* to) {
- UNIMPLEMENTED();
+ OperandContainer() {
+ for (int i = 0; i < N; i++) elems_[i] = NULL;
}
-
- const ZoneList<LMoveOperands>* move_operands() const {
- UNIMPLEMENTED();
- return NULL;
+ int length() { return N; }
+ T& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
}
+ void PrintOperandsTo(StringStream* stream);
private:
- ZoneList<LMoveOperands> move_operands_;
+ T elems_[N];
};
-class LGap: public LInstruction {
+template<typename T>
+class OperandContainer<T, 0> {
public:
- explicit LGap(HBasicBlock* block) { }
+ int length() { return 0; }
+ void PrintOperandsTo(StringStream* stream) { }
+};
+
+
+template<int R, int I, int T = 0>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
- HBasicBlock* block() const {
- UNIMPLEMENTED();
- return NULL;
+ protected:
+ OperandContainer<LOperand*, R> results_;
+ OperandContainer<LOperand*, I> inputs_;
+ OperandContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
+ public:
+ explicit LGap(HBasicBlock* block)
+ : block_(block) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
}
+ DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+ virtual void PrintDataTo(StringStream* stream);
+
+ bool IsRedundant() const;
+
+ HBasicBlock* block() const { return block_; }
+
enum InnerPosition {
BEFORE,
START,
@@ -129,14 +412,13 @@ class LGap: public LInstruction {
LAST_INNER_POSITION = AFTER
};
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+ return parallel_moves_[pos];
}
LParallelMove* GetParallelMove(InnerPosition pos) {
- UNIMPLEMENTED();
- return NULL;
+ return parallel_moves_[pos];
}
private:
@@ -145,9 +427,61 @@ class LGap: public LInstruction {
};
+class LGoto: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LGoto(int block_id, bool include_stack_check = false)
+ : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+ virtual void PrintDataTo(StringStream* stream);
+ virtual bool IsControl() const { return true; }
+
+ int block_id() const { return block_id_; }
+ bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+ int block_id_;
+ bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LLazyBailout() : gap_instructions_size_(0) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+ void set_gap_instructions_size(int gap_instructions_size) {
+ gap_instructions_size_ = gap_instructions_size;
+ }
+ int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+ int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
class LLabel: public LGap {
public:
- explicit LLabel(HBasicBlock* block) : LGap(block) { }
+ explicit LLabel(HBasicBlock* block)
+ : LGap(block), replacement_(NULL) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int block_id() const { return block()->block_id(); }
+ bool is_loop_header() const { return block()->IsLoopHeader(); }
+ Label* label() { return &label_; }
+ LLabel* replacement() const { return replacement_; }
+ void set_replacement(LLabel* label) { replacement_ = label; }
+ bool HasReplacement() const { return replacement_ != NULL; }
private:
Label label_;
@@ -155,107 +489,1287 @@ class LLabel: public LGap {
};
-class LOsrEntry: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
- // Function could be generated by a macro as in lithium-ia32.h.
- static LOsrEntry* cast(LInstruction* instr) {
- UNIMPLEMENTED();
- return NULL;
+ DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+ DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+ TranscendentalCache::Type transcendental_type() {
+ return hydrogen()->transcendental_type();
}
+};
+
+
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
- LOperand** SpilledRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+
+template<int I, int T = 0>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+ DECLARE_INSTRUCTION(ControlInstruction)
+ virtual bool IsControl() const { return true; }
+
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+ void SetBranchTargets(int true_block_id, int false_block_id) {
+ true_block_id_ = true_block_id;
+ false_block_id_ = false_block_id;
}
- LOperand** SpilledDoubleRegisterArray() {
- UNIMPLEMENTED();
- return NULL;
+
+ private:
+ int true_block_id_;
+ int false_block_id_;
+};
+
+
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+ public:
+ LApplyArguments(LOperand* function,
+ LOperand* receiver,
+ LOperand* length,
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
}
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
- UNIMPLEMENTED();
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
}
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- UNIMPLEMENTED();
+
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LArgumentsLength: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
}
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
+ public:
+ LArgumentsElements() { }
+
+ DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LModI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LDivI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCmpID: public LTemplateInstruction<1, 2> {
+ public:
+ LCmpID(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
+};
+
+
+class LCmpIDAndBranch: public LControlInstruction<2> {
+ public:
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LUnaryMathOperation(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+ virtual void PrintDataTo(StringStream* stream);
+ BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+ public:
+ LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+ public:
+ LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+ "cmp-jsobject-eq-and-branch")
+};
+
+
+class LIsNull: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LIsNull(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
+
+ bool is_strict() const { return hydrogen()->is_strict(); }
+};
+
+
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsNullAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
+
+ bool is_strict() const { return hydrogen()->is_strict(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsObject: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LIsObject(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+};
+
+
+class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+ public:
+ LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsSmi: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LIsSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LControlInstruction<1> {
+ public:
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasInstanceType: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LHasInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+};
+
+
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+ "has-instance-type-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LHasCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+ public:
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+ "has-cached-array-index-and-branch")
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LClassOfTest(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+ public:
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+ "class-of-test-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpT: public LTemplateInstruction<1, 2> {
+ public:
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LControlInstruction<2> {
+ public:
+ LCmpTAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+ Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf: public LTemplateInstruction<1, 2> {
+ public:
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LControlInstruction<2> {
+ public:
+ LInstanceOfAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+};
+
+
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+ "instance-of-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+ Handle<JSFunction> function() const { return hydrogen()->function(); }
+};
+
+
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
+
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LTemplateInstruction<1, 2> {
+ public:
+ LBitI(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ Token::Value op_;
};
-class LPointerMap: public ZoneObject {
+class LShiftI: public LTemplateInstruction<1, 2> {
public:
- explicit LPointerMap(int position)
- : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+ LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
- int lithium_position() const {
- UNIMPLEMENTED();
- return 0;
+ bool can_deopt() const { return can_deopt_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+ Token::Value op_;
+ bool can_deopt_;
+};
+
+
+class LSubI: public LTemplateInstruction<1, 2> {
+ public:
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
}
- void RecordPointer(LOperand* op) { UNIMPLEMENTED(); }
+ DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+template <int temp_count>
+class LConstant: public LTemplateInstruction<1, 0, temp_count> {
+ DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant<0> {
+ public:
+ explicit LConstantI(int32_t value) : value_(value) { }
+ int32_t value() const { return value_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
private:
- ZoneList<LOperand*> pointer_operands_;
- int position_;
- int lithium_position_;
-};
-
-
-class LEnvironment: public ZoneObject {
- public:
- LEnvironment(Handle<JSFunction> closure,
- int ast_id,
- int parameter_count,
- int argument_count,
- int value_count,
- LEnvironment* outer)
- : closure_(closure),
- arguments_stack_height_(argument_count),
- deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
- translation_index_(-1),
- ast_id_(ast_id),
- parameter_count_(parameter_count),
- values_(value_count),
- representations_(value_count),
- spilled_registers_(NULL),
- spilled_double_registers_(NULL),
- outer_(outer) {
- }
-
- Handle<JSFunction> closure() const { return closure_; }
- int arguments_stack_height() const { return arguments_stack_height_; }
- int deoptimization_index() const { return deoptimization_index_; }
- int translation_index() const { return translation_index_; }
- int ast_id() const { return ast_id_; }
- int parameter_count() const { return parameter_count_; }
- const ZoneList<LOperand*>* values() const { return &values_; }
- LEnvironment* outer() const { return outer_; }
+ int32_t value_;
+};
+
+
+class LConstantD: public LConstant<1> {
+ public:
+ explicit LConstantD(double value, LOperand* temp) : value_(value) {
+ temps_[0] = temp;
+ }
+ double value() const { return value_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+ double value_;
+};
+
+
+class LConstantT: public LConstant<0> {
+ public:
+ explicit LConstantT(Handle<Object> value) : value_(value) { }
+ Handle<Object> value() const { return value_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
private:
- Handle<JSFunction> closure_;
- int arguments_stack_height_;
- int deoptimization_index_;
- int translation_index_;
- int ast_id_;
- int parameter_count_;
- ZoneList<LOperand*> values_;
- ZoneList<Representation> representations_;
+ Handle<Object> value_;
+};
+
+
+class LBranch: public LControlInstruction<1> {
+ public:
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+ DECLARE_HYDROGEN_ACCESSOR(Value)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LCmpMapAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
- // Allocation index indexed arrays of spill slot operands for registers
- // that are also in spill slots at an OSR entry. NULL for environments
- // that do not correspond to an OSR entry.
- LOperand** spilled_registers_;
- LOperand** spilled_double_registers_;
+ virtual bool IsControl() const { return true; }
- LEnvironment* outer_;
+ Handle<Map> map() const { return hydrogen()->map(); }
+ int true_block_id() const {
+ return hydrogen()->true_destination()->block_id();
+ }
+ int false_block_id() const {
+ return hydrogen()->false_destination()->block_id();
+ }
+};
+
+
+class LJSArrayLength: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
+};
+
+
+class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LFixedArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
+ DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
+};
+
+
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+ DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+};
+
+
+class LThrow: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LTemplateInstruction<1, 2> {
+ public:
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+ DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LTemplateInstruction<1, 2> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LTemplateInstruction<1, 2> {
+ public:
+ LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ Token::Value op() const { return op_; }
+
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ private:
+ Token::Value op_;
+};
+
+
+class LArithmeticT: public LTemplateInstruction<1, 2> {
+ public:
+ LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const;
+
+ Token::Value op() const { return op_; }
+
+ private:
+ Token::Value op_;
+};
+
+
+class LReturn: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+ LOperand* object() { return inputs_[0]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+ inputs_[0] = function;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+ DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+ LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadElements: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+ public:
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+ public:
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LStoreGlobal(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+ DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+ int context_chain_length() { return hydrogen()->context_chain_length(); }
+ int slot_index() { return hydrogen()->slot_index(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LPushArgument: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> function() { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LTemplateInstruction<1, 0, 1> {
+ public:
+ explicit LCallKeyed(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+ DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const { return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+ DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+ int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<String> name() const {return hydrogen()->name(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+ DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ Handle<JSFunction> target() const { return hydrogen()->target(); }
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+ DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+ DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+ Runtime::Function* function() const { return hydrogen()->function(); }
+ int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LNumberTagD(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LDoubleToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LTaggedToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LTemplateInstruction<1, 1> {
+ public:
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ bool needs_check() const { return needs_check_; }
+
+ private:
+ bool needs_check_;
+};
+
+
+class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreNamed(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
+
+ DECLARE_INSTRUCTION(StoreNamed)
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+ LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
+ : LStoreNamed(obj, val) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+ LStoreNamedGeneric(LOperand* obj, LOperand* val)
+ : LStoreNamed(obj, val) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+};
+
+
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_INSTRUCTION(StoreKeyed)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+ : LStoreKeyed(obj, key, val) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+ : LStoreKeyed(obj, key, val) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+ DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
+ public:
+ LCheckInstanceType(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+ DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMap: public LTemplateInstruction<0, 1> {
+ public:
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
+ public:
+ explicit LCheckPrototypeMaps(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
+ Handle<JSObject> holder() const { return hydrogen()->holder(); }
+};
+
+
+class LCheckSmi: public LTemplateInstruction<0, 1> {
+ public:
+ LCheckSmi(LOperand* value, Condition condition)
+ : condition_(condition) {
+ inputs_[0] = value;
+ }
+
+ Condition condition() const { return condition_; }
+
+ virtual void CompileToNative(LCodeGen* generator);
+ virtual const char* Mnemonic() const {
+ return (condition_ == zero) ? "check-non-smi" : "check-smi";
+ }
+
+ private:
+ Condition condition_;
+};
+
+
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+ DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+ DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+ Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LTemplateInstruction<1, 1> {
+ public:
+ explicit LTypeofIs(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LTypeofIsAndBranch: public LControlInstruction<1> {
+ public:
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LDeleteProperty: public LTemplateInstruction<1, 2> {
+ public:
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
+ public:
+ LOsrEntry();
+
+ DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+ LOperand** SpilledRegisterArray() { return register_spills_; }
+ LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand);
+
+ private:
+ // Arrays of spill slot operands for registers with an assigned spill
+ // slot, i.e., that must also be restored to the spill slot on OSR entry.
+ // NULL if the register has no assigned spill slot. Indexed by allocation
+ // index.
+ LOperand* register_spills_[Register::kNumAllocatableRegisters];
+ LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
};
@@ -269,57 +1783,51 @@ class LChunk: public ZoneObject {
pointer_maps_(8),
inlined_closures_(1) { }
+ int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+ LConstantOperand* DefineConstantOperand(HConstant* constant);
+ Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+ Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+ int GetNextSpillIndex(bool is_double);
+ LOperand* GetNextSpillSlot(bool is_double);
+
+ int ParameterAt(int index);
+ int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+ void AddGapMove(int index, LOperand* from, LOperand* to);
+ LGap* GetGapAt(int index) const;
+ bool IsGapAt(int index) const;
+ int NearestGapPos(int index) const;
+ void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
- const ZoneList<Handle<JSFunction> >* inlined_closures() const {
- return &inlined_closures_;
- }
-
- LOperand* GetNextSpillSlot(bool double_slot) {
- UNIMPLEMENTED();
- return NULL;
- }
-
- LConstantOperand* DefineConstantOperand(HConstant* constant) {
- UNIMPLEMENTED();
- return NULL;
- }
-
LLabel* GetLabel(int block_id) const {
- UNIMPLEMENTED();
- return NULL;
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+ return LLabel::cast(instructions_[first_instruction]);
}
-
- int GetParameterStackSlot(int index) const {
- UNIMPLEMENTED();
- return 0;
+ int LookupDestination(int block_id) const {
+ LLabel* cur = GetLabel(block_id);
+ while (cur->replacement() != NULL) {
+ cur = cur->replacement();
+ }
+ return cur->block_id();
}
-
- void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
-
- LGap* GetGapAt(int index) const {
- UNIMPLEMENTED();
- return NULL;
+ Label* GetAssemblyLabel(int block_id) const {
+ LLabel* label = GetLabel(block_id);
+ ASSERT(!label->HasReplacement());
+ return label->label();
}
- bool IsGapAt(int index) const {
- UNIMPLEMENTED();
- return false;
+ const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+ return &inlined_closures_;
}
- int NearestGapPos(int index) const {
- UNIMPLEMENTED();
- return 0;
+ void AddInlinedClosure(Handle<JSFunction> closure) {
+ inlined_closures_.Add(closure);
}
- void MarkEmptyBlocks() { UNIMPLEMENTED(); }
-
-#ifdef DEBUG
- void Verify() { }
-#endif
-
private:
int spill_slot_count_;
HGraph* const graph_;
@@ -348,10 +1856,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* Build();
// Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
- UNIMPLEMENTED(); \
- return NULL; \
- }
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
@@ -373,7 +1878,101 @@ class LChunkBuilder BASE_EMBEDDED {
void Abort(const char* format, ...);
+ // Methods for getting operands for Use / Define / Temp.
+ LRegister* ToOperand(Register reg);
+ LUnallocated* ToUnallocated(Register reg);
+ LUnallocated* ToUnallocated(XMMRegister reg);
+
+ // Methods for setting up define-use relationships.
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ XMMRegister fixed_register);
+
+ // A value that is guaranteed to be allocated to a register.
+ // Operand created by UseRegister is guaranteed to be live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ // Operand created by UseRegisterAtStart is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+ // A value in a register that may be trashed.
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+ // An operand value in a register or stack slot.
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+ // An operand value in a register, stack slot or a constant operand.
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+ // An operand value in a register or a constant operand.
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
+
+ // An operand value in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ LOperand* UseAny(HValue* value);
+
+ // Methods for setting up define-use relationships.
+ // Return the same instruction that they are passed.
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ XMMRegister reg);
+ LInstruction* AssignEnvironment(LInstruction* instr);
+ LInstruction* AssignPointerMap(LInstruction* instr);
+
+ enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+ // By default we assume that instruction sequences generated for calls
+ // cannot deoptimize eagerly and we do not attach environment to this
+ // instruction.
+ LInstruction* MarkAsCall(
+ LInstruction* instr,
+ HInstruction* hinstr,
+ CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+ LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id);
+ void ClearInstructionPendingDeoptimizationEnvironment();
+
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+ void VisitInstruction(HInstruction* current);
+
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+ LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+ LInstruction* DoArithmeticD(Token::Value op,
+ HArithmeticBinaryOperation* instr);
+ LInstruction* DoArithmeticT(Token::Value op,
+ HArithmeticBinaryOperation* instr);
LChunk* chunk_;
HGraph* const graph_;
@@ -390,7 +1989,10 @@ class LChunkBuilder BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} } // namespace v8::int
#endif // V8_X64_LITHIUM_X64_H_
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2846fe26..f95755db 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1274,8 +1274,6 @@ void MacroAssembler::Move(Register dst, Register src) {
}
-
-
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 57cba142..8c1b5794 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1327,8 +1327,8 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj =
- StubCache::ComputeCallMiss(arguments().immediate(), kind_);
+ MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
+ kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1660,9 +1660,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
- GenerateNameCheck(name, &miss);
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1690,7 +1696,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@@ -1698,11 +1704,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+ }
__ bind(&miss);
+ // Restore function name in rcx.
+ __ Move(rcx, Handle<String>(name));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1733,9 +1744,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
+ Label name_miss;
Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
- GenerateNameCheck(name, &miss);
+ if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@@ -1765,7 +1782,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
- &index_out_of_range,
+ index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@@ -1773,11 +1790,16 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
- __ ret((argc + 1) * kPointerSize);
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+ }
__ bind(&miss);
+ // Restore function name in rcx.
+ __ Move(rcx, Handle<String>(name));
+ __ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -2262,17 +2284,24 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
- // Setup the context (function already in edi).
+ // Setup the context (function already in rdi).
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
__ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
-
+ if (V8::UseCrankshaft()) {
+ // TODO(kasperl): For now, we always call indirectly through the
+ // code field in the function to allow recompilation to take effect
+ // without changing any of the call sites.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
+ } else {
+ Handle<Code> code(function->code());
+ __ InvokeCode(code, expected, arguments(),
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ }
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 5893a2f8..46729603 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,6 +47,7 @@ inline void* Zone::New(int size) {
// Check that the result has the proper alignment and return it.
ASSERT(IsAddressAligned(result, kAlignment, 0));
+ allocation_size_ += size;
return reinterpret_cast<void*>(result);
}
diff --git a/src/zone.cc b/src/zone.cc
index 01df4504..f8dbaabc 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -38,6 +38,7 @@ Address Zone::position_ = 0;
Address Zone::limit_ = 0;
int Zone::zone_excess_limit_ = 256 * MB;
int Zone::segment_bytes_allocated_ = 0;
+unsigned Zone::allocation_size_ = 0;
bool AssertNoZoneAllocation::allow_allocation_ = true;
diff --git a/src/zone.h b/src/zone.h
index dde722f6..e299f158 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -71,6 +71,8 @@ class Zone {
static inline void adjust_segment_bytes_allocated(int delta);
+ static unsigned allocation_size_;
+
private:
// All pointers returned from New() have this alignment.