summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Monsen <kristianm@google.com>2010-09-08 11:05:35 +0100
committerKristian Monsen <kristianm@google.com>2010-09-08 11:48:46 +0100
commit80d68eab642096c1a48b6474d6ec33064b0ad1f5 (patch)
tree6d3389d4fa4ca4a913970f75df6bbad6b1c858e9
parent791712a13f1814dd3ab5d1a5ab8ff5dbc476f6d6 (diff)
downloadandroid_external_v8-80d68eab642096c1a48b6474d6ec33064b0ad1f5.tar.gz
android_external_v8-80d68eab642096c1a48b6474d6ec33064b0ad1f5.tar.bz2
android_external_v8-80d68eab642096c1a48b6474d6ec33064b0ad1f5.zip
Update V8 to r5388 as required by WebKit r66666
Change-Id: Ib3c42e9b7226d22c65c7077c543fe31afe62a318
-rw-r--r--Android.v8common.mk3
-rw-r--r--ChangeLog28
-rw-r--r--V8_MERGE_REVISION7
-rwxr-xr-xsrc/SConscript4
-rw-r--r--src/accessors.h6
-rw-r--r--src/arm/assembler-arm.cc58
-rw-r--r--src/arm/assembler-arm.h10
-rw-r--r--src/arm/builtins-arm.cc11
-rw-r--r--src/arm/code-stubs-arm.cc4777
-rw-r--r--src/arm/code-stubs-arm.h491
-rw-r--r--src/arm/codegen-arm.cc4895
-rw-r--r--src/arm/codegen-arm.h515
-rw-r--r--src/arm/constants-arm.h27
-rw-r--r--src/arm/debug-arm.cc95
-rw-r--r--src/arm/disasm-arm.cc22
-rw-r--r--src/arm/frames-arm.cc73
-rw-r--r--src/arm/frames-arm.h5
-rw-r--r--src/arm/full-codegen-arm.cc804
-rw-r--r--src/arm/ic-arm.cc32
-rw-r--r--src/arm/macro-assembler-arm.cc101
-rw-r--r--src/arm/macro-assembler-arm.h18
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc2
-rw-r--r--src/arm/regexp-macro-assembler-arm.h16
-rw-r--r--src/arm/simulator-arm.cc81
-rw-r--r--src/arm/stub-cache-arm.cc131
-rw-r--r--src/array.js15
-rw-r--r--src/ast-inl.h3
-rw-r--r--src/ast.cc624
-rw-r--r--src/ast.h369
-rw-r--r--src/bootstrapper.cc11
-rw-r--r--src/builtins.cc12
-rw-r--r--src/builtins.h7
-rw-r--r--src/circular-queue.cc5
-rw-r--r--src/code-stubs.h618
-rw-r--r--src/codegen.cc12
-rw-r--r--src/codegen.h629
-rwxr-xr-xsrc/compiler.cc77
-rw-r--r--src/contexts.h5
-rw-r--r--src/conversions.cc9
-rw-r--r--src/conversions.h6
-rw-r--r--src/data-flow.cc278
-rw-r--r--src/data-flow.h76
-rw-r--r--src/debug.cc13
-rw-r--r--src/debug.h16
-rw-r--r--src/disassembler.cc7
-rw-r--r--src/flags.h2
-rw-r--r--src/flow-graph.cc763
-rw-r--r--src/flow-graph.h180
-rw-r--r--src/frames-inl.h15
-rw-r--r--src/frames.cc193
-rw-r--r--src/frames.h78
-rw-r--r--src/full-codegen.cc688
-rw-r--r--src/full-codegen.h174
-rw-r--r--src/func-name-inferrer.cc14
-rw-r--r--src/func-name-inferrer.h54
-rw-r--r--src/globals.h12
-rw-r--r--src/heap-inl.h3
-rw-r--r--src/heap.cc64
-rw-r--r--src/heap.h241
-rw-r--r--src/ia32/builtins-ia32.cc145
-rw-r--r--src/ia32/code-stubs-ia32.cc4540
-rw-r--r--src/ia32/code-stubs-ia32.h360
-rw-r--r--src/ia32/codegen-ia32.cc4679
-rw-r--r--src/ia32/codegen-ia32.h329
-rw-r--r--src/ia32/debug-ia32.cc87
-rw-r--r--src/ia32/frames-ia32.cc64
-rw-r--r--src/ia32/full-codegen-ia32.cc1206
-rw-r--r--src/ia32/ic-ia32.cc29
-rw-r--r--src/ia32/macro-assembler-ia32.cc178
-rw-r--r--src/ia32/macro-assembler-ia32.h33
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--src/ia32/stub-cache-ia32.cc12
-rw-r--r--src/ia32/virtual-frame-ia32.cc4
-rw-r--r--src/ic-inl.h4
-rw-r--r--src/ic.cc6
-rw-r--r--src/ic.h4
-rw-r--r--src/liveedit.cc32
-rw-r--r--src/log.cc4
-rw-r--r--src/macro-assembler.h27
-rw-r--r--src/macros.py2
-rw-r--r--src/mark-compact.cc90
-rw-r--r--src/mark-compact.h42
-rw-r--r--src/memory.h4
-rw-r--r--src/objects-debug.cc27
-rw-r--r--src/objects-inl.h28
-rw-r--r--src/objects.cc216
-rw-r--r--src/objects.h171
-rw-r--r--src/parser.cc351
-rw-r--r--src/parser.h48
-rw-r--r--src/prettyprinter.cc65
-rw-r--r--src/prettyprinter.h4
-rw-r--r--src/profile-generator.cc4
-rw-r--r--src/profile-generator.h3
-rw-r--r--src/regexp-macro-assembler.h2
-rw-r--r--src/rewriter.cc64
-rw-r--r--src/runtime.cc296
-rw-r--r--src/runtime.h4
-rw-r--r--src/runtime.js5
-rwxr-xr-xsrc/scanner.cc146
-rw-r--r--src/scanner.h102
-rw-r--r--src/serialize.cc10
-rw-r--r--src/spaces.cc179
-rw-r--r--src/spaces.h20
-rw-r--r--src/stub-cache.cc91
-rw-r--r--src/stub-cache.h229
-rw-r--r--src/token.h4
-rw-r--r--src/top.cc34
-rw-r--r--src/top.h9
-rw-r--r--src/utils.h209
-rw-r--r--src/v8-counters.h11
-rw-r--r--src/v8.h3
-rw-r--r--src/v8natives.js28
-rw-r--r--src/v8threads.cc22
-rw-r--r--src/v8threads.h2
-rw-r--r--src/version.cc6
-rw-r--r--src/x64/builtins-x64.cc11
-rw-r--r--src/x64/code-stubs-x64.cc4015
-rw-r--r--src/x64/code-stubs-x64.h389
-rw-r--r--src/x64/codegen-x64.cc4147
-rw-r--r--src/x64/codegen-x64.h359
-rw-r--r--src/x64/debug-x64.cc92
-rw-r--r--src/x64/frames-x64.cc60
-rw-r--r--src/x64/full-codegen-x64.cc938
-rw-r--r--src/x64/ic-x64.cc29
-rw-r--r--src/x64/macro-assembler-x64.cc153
-rw-r--r--src/x64/macro-assembler-x64.h24
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--src/x64/stub-cache-x64.cc131
-rw-r--r--src/x64/virtual-frame-x64.cc4
-rw-r--r--test/cctest/cctest.status5
-rw-r--r--test/cctest/test-api.cc4
-rw-r--r--test/cctest/test-assembler-arm.cc63
-rw-r--r--test/cctest/test-debug.cc73
-rw-r--r--test/cctest/test-disasm-arm.cc48
-rw-r--r--test/cctest/test-heap.cc29
-rw-r--r--test/cctest/test-log-stack-tracer.cc22
-rw-r--r--test/cctest/test-profile-generator.cc17
-rw-r--r--test/cctest/test-serialize.cc15
-rw-r--r--test/cctest/test-utils.cc61
-rw-r--r--test/cctest/testcfg.py8
-rw-r--r--test/es5conform/testcfg.py5
-rw-r--r--test/message/testcfg.py5
-rw-r--r--test/mjsunit/array-splice.js7
-rw-r--r--test/mjsunit/const-eval-init.js4
-rw-r--r--test/mjsunit/fuzz-natives.js6
-rw-r--r--test/mjsunit/regress/regress-842.js42
-rw-r--r--test/mjsunit/regress/regress-851.js32
-rw-r--r--test/mjsunit/testcfg.py12
-rw-r--r--test/mjsunit/third_party/array-splice-webkit.js2
-rw-r--r--test/mozilla/mozilla.status5
-rw-r--r--test/mozilla/testcfg.py7
-rw-r--r--test/sputnik/testcfg.py10
-rw-r--r--tools/gyp/v8.gyp8
-rw-r--r--tools/oom_dump/oom_dump.cc4
-rwxr-xr-xtools/test.py73
-rw-r--r--tools/utils.py8
-rw-r--r--tools/v8.xcodeproj/project.pbxproj20
-rw-r--r--tools/visual_studio/v8_base.vcproj16
-rw-r--r--tools/visual_studio/v8_base_arm.vcproj8
-rw-r--r--tools/visual_studio/v8_base_x64.vcproj8
160 files changed, 20939 insertions, 21803 deletions
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 263b9aba..113eec48 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -29,7 +29,6 @@ V8_LOCAL_SRC_FILES := \
src/fast-dtoa.cc \
src/fixed-dtoa.cc \
src/flags.cc \
- src/flow-graph.cc \
src/frame-element.cc \
src/frames.cc \
src/full-codegen.cc \
@@ -86,6 +85,7 @@ ifeq ($(TARGET_ARCH),arm)
src/arm/assembler-arm.cc \
src/arm/builtins-arm.cc \
src/arm/codegen-arm.cc \
+ src/arm/code-stubs-arm.cc \
src/arm/constants-arm.cc \
src/arm/cpu-arm.cc \
src/arm/debug-arm.cc \
@@ -106,6 +106,7 @@ ifeq ($(TARGET_ARCH),x86)
src/ia32/assembler-ia32.cc \
src/ia32/builtins-ia32.cc \
src/ia32/codegen-ia32.cc \
+ src/ia32/code-stubs-arm.cc \
src/ia32/cpu-ia32.cc \
src/ia32/disasm-ia32.cc \
src/ia32/frames-ia32.cc \
diff --git a/ChangeLog b/ChangeLog
index cae9a429..37f427c5 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,29 @@
+2010-09-01: Version 2.4.0
+
+ Fix bug in Object.freeze and Object.seal when Array.prototype or
+ Object.prototype is changed (issue 842).
+
+ Update Array.splice to follow Safari and Firefox when called
+ with zero arguments.
+
+ Fix a missing live register when breaking at keyed loads on ARM.
+
+ Performance improvements on all platforms.
+
+
+2010-08-25: Version 2.3.11
+
+ Fix bug in RegExp related to copy-on-write arrays.
+
+ Refactoring of tools/test.py script, including the introduction of
+ VARIANT_FLAGS that allows specification of sets of flags with which
+ all tests should be run.
+
+ Fix a bug in the handling of debug breaks in CallIC.
+
+ Performance improvements on all platforms.
+
+
2010-08-23: Version 2.3.10
Fix bug in bitops on ARM.
@@ -18,7 +44,7 @@
Fixed DST cache to take into account the suspension of DST in
Egypt during the 2010 Ramadan (issue http://crbug.com/51855).
- Performance improvements on all platforms.
+ Performance improvements on all platforms.
2010-08-16: Version 2.3.8
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 3e88f018..31f3bd89 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,3 +1,4 @@
-Currently we are using V8 at http://v8.googlecode.com/svn/trunk@5318,
-which is ahead of our current WebKit revision
-See b/2947054
+We use a V8 revision that has been used for a Chromium release.
+
+http://src.chromium.org/svn/releases/7.0.514.1/DEPS
+http://v8.googlecode.com/svn/trunk@5388
diff --git a/src/SConscript b/src/SConscript
index e6b4e382..7fae8d4b 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -62,7 +62,6 @@ SOURCES = {
execution.cc
factory.cc
flags.cc
- flow-graph.cc
frame-element.cc
frames.cc
full-codegen.cc
@@ -121,6 +120,7 @@ SOURCES = {
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
+ arm/code-stubs-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
arm/cpu-arm.cc
@@ -159,6 +159,7 @@ SOURCES = {
virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
+ ia32/code-stubs-ia32.cc
ia32/codegen-ia32.cc
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
@@ -178,6 +179,7 @@ SOURCES = {
virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
+ x64/code-stubs-x64.cc
x64/codegen-x64.cc
x64/cpu-x64.cc
x64/debug-x64.cc
diff --git a/src/accessors.h b/src/accessors.h
index 7a840a19..eeab2acf 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -75,8 +75,10 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- static Object* FunctionGetPrototype(Object* object, void*);
- static Object* FunctionSetPrototype(JSObject* object, Object* value, void*);
+ MUST_USE_RESULT static Object* FunctionGetPrototype(Object* object, void*);
+ MUST_USE_RESULT static Object* FunctionSetPrototype(JSObject* object,
+ Object* value,
+ void*);
private:
// Accessor functions only used through the descriptor.
static Object* FunctionGetLength(Object* object, void*);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 6df6411d..7d368bf4 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1809,6 +1809,7 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+
void Assembler::vldr(const DwVfpRegister dst,
const Register base,
int offset,
@@ -1838,7 +1839,9 @@ void Assembler::vldr(const SwVfpRegister dst,
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
- emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ int sd, d;
+ dst.split_code(&sd, &d);
+ emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@@ -1872,7 +1875,9 @@ void Assembler::vstr(const SwVfpRegister src,
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
- emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ int sd, d;
+ src.split_code(&sd, &d);
+ emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
@@ -1979,8 +1984,10 @@ void Assembler::vmov(const SwVfpRegister dst,
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP3));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B6 | src.code());
+ int sd, d, sm, m;
+ dst.split_code(&sd, &d);
+ src.split_code(&sm, &m);
+ emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
}
@@ -2034,8 +2041,9 @@ void Assembler::vmov(const SwVfpRegister dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc));
- emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
- src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+ int sn, n;
+ dst.split_code(&sn, &n);
+ emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
}
@@ -2048,8 +2056,9 @@ void Assembler::vmov(const Register dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc));
- emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
- dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+ int sn, n;
+ src.split_code(&sn, &n);
+ emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
}
@@ -2099,16 +2108,21 @@ static bool IsDoubleVFPType(VFPType type) {
}
-// Depending on split_last_bit split binary representation of reg_code into Vm:M
-// or M:Vm form (where M is single bit).
-static void SplitRegCode(bool split_last_bit,
+// Split five bit reg_code based on size of reg_type.
+// 32-bit register codes are Vm:M
+// 64-bit register codes are M:Vm
+// where Vm is four bits, and M is a single bit.
+static void SplitRegCode(VFPType reg_type,
int reg_code,
int* vm,
int* m) {
- if (split_last_bit) {
+ ASSERT((reg_code >= 0) && (reg_code <= 31));
+ if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
+ // 32 bit type.
*m = reg_code & 0x1;
*vm = reg_code >> 1;
} else {
+ // 64 bit type.
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
@@ -2121,6 +2135,11 @@ static Instr EncodeVCVT(const VFPType dst_type,
const VFPType src_type,
const int src_code,
const Condition cond) {
+ ASSERT(src_type != dst_type);
+ int D, Vd, M, Vm;
+ SplitRegCode(src_type, src_code, &Vm, &M);
+ SplitRegCode(dst_type, dst_code, &Vd, &D);
+
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
// Conversion between IEEE floating point and 32-bit integer.
// Instruction details available in ARM DDI 0406B, A8.6.295.
@@ -2128,22 +2147,17 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
- int sz, opc2, D, Vd, M, Vm, op;
+ int sz, opc2, op;
if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = 1; // round towards zero
- SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
- SplitRegCode(true, dst_code, &Vd, &D);
} else {
ASSERT(IsIntegerVFPType(src_type));
-
opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
- SplitRegCode(true, src_code, &Vm, &M);
- SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
}
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
@@ -2153,13 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- int sz, D, Vd, M, Vm;
-
- ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
- sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
- SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
-
+ int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index cc6ec054..be9aa92f 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -120,6 +120,11 @@ struct SwVfpRegister {
ASSERT(is_valid());
return 1 << code_;
}
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = code_ & 0x1;
+ *vm = code_ >> 1;
+ }
int code_;
};
@@ -152,6 +157,11 @@ struct DwVfpRegister {
ASSERT(is_valid());
return 1 << code_;
}
+ void split_code(int* vm, int* m) const {
+ ASSERT(is_valid());
+ *m = (code_ & 0x10) >> 4;
+ *vm = code_ & 0x0F;
+ }
int code_;
};
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 224b75f1..a902fc21 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -481,6 +481,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // TODO(849): implement custom construct stub.
+ // Generate a copy of the generic stub for now.
+ Generate_JSConstructStubGeneric(masm);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -505,10 +512,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc.
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
new file mode 100644
index 00000000..f75ee8bd
--- /dev/null
+++ b/src/arm/code-stubs-arm.cc
@@ -0,0 +1,4777 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_ARM)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* lhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in cp.
+ Label gc;
+
+ // Pop the function info from the stack.
+ __ pop(r3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize,
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+ __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ Push(cp, r3);
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the surrounding context.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r3, ip);
+ __ b(eq, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(r3);
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, expected_map_index);
+ __ cmp(r3, ip);
+ __ Assert(eq, message);
+ __ pop(r3);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ r0,
+ r1,
+ r2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ ldr(r1, FieldMemOperand(r3, i));
+ __ str(r1, FieldMemOperand(r0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ add(r2, r0, Operand(JSArray::kSize));
+ __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
+ // Subtract from 0 if source was negative.
+ __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ cmp(source_, Operand(1));
+ __ b(gt, &not_special);
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, Operand(0));
+ __ Ret();
+
+ __ bind(&not_special);
+ // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ CountLeadingZeros(zeros_, source_, mantissa);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here. Use a fudge factor to
+ // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
+ // that fit in the ARM's constant field.
+ int fudge = 0x400;
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
+ __ add(mantissa, mantissa, Operand(fudge));
+ __ orr(exponent,
+ exponent,
+ Operand(mantissa, LSL, HeapNumber::kExponentShift));
+ // Shift up the source chopping the top bit off.
+ __ add(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ mov(source_, Operand(source_, LSL, zeros_));
+ // Compute lower part of fraction (last 12 bits).
+ __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
+ // And the top (top 20 bits).
+ __ orr(exponent,
+ exponent,
+ Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
+ __ Ret();
+}
+
+
+// See comment for class.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent. This test
+ // has the neat side effect of setting the flags according to the sign.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ cmp(the_int_, Operand(0x80000000u));
+ __ b(eq, &max_negative_int);
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ mov(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+ // Subtract from 0 if the value was negative.
+ __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
+ __ str(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(ip, Operand(0));
+ __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ __ cmp(r0, r1);
+ __ b(ne, &not_identical);
+
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cc == lt) {
+ __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == gt) {
+ __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+ }
+
+ __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* lhs_not_nan,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ Label rhs_is_smi;
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &rhs_is_smi);
+
+ // Lhs is a Smi. Check whether the rhs is a heap number.
+ __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
+ // succeed. Return non-equal
+ // If rhs is r0 then there is already a non zero value in it.
+ if (!rhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert lhs to a double in d7.
+ CpuFeatures::Scope scope(VFP3);
+ __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ __ push(lr);
+ // Convert lhs to a double in r2, r3.
+ __ mov(r7, Operand(lhs));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Load rhs to a double in r0, r1.
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ pop(lr);
+ }
+
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a smi.
+ __ jmp(lhs_not_nan);
+
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
+ __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs is not a number and rhs is a smi then strict equality cannot
+ // succeed. Return non-equal.
+ // If lhs is r0 then there is already a non zero value in it.
+ if (!lhs.is(r0)) {
+ __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
+ }
+ __ Ret(ne);
+ } else {
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Rhs is a smi, lhs is a heap number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ // Convert rhs to a double in d6 .
+ __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
+ } else {
+ __ push(lr);
+ // Load lhs to a double in r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ // Convert rhs to a double in r0, r1.
+ __ mov(r7, Operand(rhs));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
+ Label one_is_nan, neither_is_nan;
+
+ __ Sbfx(r4,
+ lhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, lhs_not_nan);
+ __ mov(r4,
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(lhs_mantissa, Operand(0));
+ __ b(ne, &one_is_nan);
+
+ __ bind(lhs_not_nan);
+ __ Sbfx(r4,
+ rhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, &neither_is_nan);
+ __ mov(r4,
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(rhs_mantissa, Operand(0));
+ __ b(eq, &neither_is_nan);
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in r0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+
+ __ bind(&neither_is_nan);
+}
+
+
+// See comment at call site.
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
+
+ // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ __ cmp(rhs_mantissa, Operand(lhs_mantissa));
+ __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
+ // Return non-zero if the numbers are unequal.
+ __ Ret(ne);
+
+ __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
+ // If exponents are equal then return 0.
+ __ Ret(eq);
+
+ // Exponents are unequal. The only way we can return that the numbers
+ // are equal is if one is -0 and the other is 0. We already dealt
+ // with the case where both are -0 or both are 0.
+ // We start by seeing if the mantissas (that are equal) or the bottom
+ // 31 bits of the rhs exponent are non-zero. If so we return not
+ // equal.
+ __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ mov(r0, Operand(r4), LeaveCC, ne);
+ __ Ret(ne);
+ // Now they are equal if and only if the lhs exponent is zero in its
+ // low 31 bits.
+ __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
+ __ Ret();
+ } else {
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ __ push(lr);
+ __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
+ __ CallCFunction(ExternalReference::compare_doubles(), 4);
+ __ pop(pc); // Return.
+ }
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // If either operand is a JSObject or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r2 and compare it with
+ // FIRST_JS_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &first_non_object);
+
+ // Return non-zero (r0 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r2, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r3, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ and_(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(ne, &return_not_equal);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, not_heap_numbers);
+ __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ cmp(r2, r3);
+ __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ }
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0)));
+
+ // r2 is object type of rhs.
+ // Ensure that no non-strings have the symbol bit set.
+ Label object_test;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, &object_test);
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(eq, possible_strings);
+ __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_both_strings);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, possible_strings);
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ mov(r0, Operand(NOT_EQUAL));
+ __ Ret();
+
+ __ bind(&object_test);
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, not_both_strings);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, not_both_strings);
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ and_(r0, r2, Operand(r3));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ __ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ BranchOnSmi(object, &is_smi);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ true);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ __ eor(scratch1, scratch1, Operand(scratch2));
+ __ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ __ ldr(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ BranchOnSmi(probe, not_found);
+ __ sub(scratch2, object, Operand(kHeapObjectTag));
+ __ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ __ sub(probe, probe, Operand(kHeapObjectTag));
+ __ vldr(d1, probe, HeapNumber::kValueOffset);
+ __ vcmp(d0, d1);
+ __ vmrs(pc);
+ __ b(ne, not_found); // The cache did not contain this value.
+ __ b(&load_result_from_cache);
+ } else {
+ __ b(not_found);
+ }
+ }
+
+ __ bind(&is_smi);
+ Register scratch = scratch1;
+ __ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ Register probe = mask;
+ __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ cmp(object, probe);
+ __ b(ne, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ ldr(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native,
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ ldr(r1, MemOperand(sp, 0));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
+ __ add(sp, sp, Operand(1 * kPointerSize));
+ __ Ret();
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ __ add(offset_, object_, Operand(offset_));
+ __ RecordWriteHelper(object_, offset_, scratch_);
+ __ Ret();
+}
+
+
+// On entry lhs_ and rhs_ are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ and_(r2, lhs_, Operand(rhs_));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to lhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison. If VFP3 is supported the double values of the numbers have
+ // been loaded into d7 and d6. Otherwise, the double values have been loaded
+ // into r0, r1, r2, and r3.
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // The arguments have been converted to doubles and stored in d6 and d7, if
+ // VFP3 is supported, or in r0, r1, r2, and r3.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ bind(&lhs_not_nan);
+ CpuFeatures::Scope scope(VFP3);
+ Label no_nan;
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ vcmp(d7, d6);
+ __ vmrs(pc); // Move vector status bits to normal status bits.
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc_ == lt || cc_ == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
+ } else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds lhs_not_nan.
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in rhs_ and lhs_.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ }
+
+ Label check_for_symbols;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case r2 will contain the type of rhs_. Never falls through.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &flat_string_check);
+
+ __ bind(&check_for_symbols);
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // symbols.
+ if (cc_ == eq && !strict_) {
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r2 is the type of rhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ r2,
+ r3,
+ r4,
+ r5);
+ // Never falls through to here.
+
+ __ bind(&slow);
+
+ __ Push(lhs_, rhs_);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ncr = LESS;
+ }
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_JS);
+}
+
+
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result;
+ Label not_heap_number;
+ Register scratch = r7;
+
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, ip);
+ __ b(&not_heap_number, ne);
+
+ __ sub(ip, tos_, Operand(kHeapObjectTag));
+ __ vldr(d1, ip, HeapNumber::kValueOffset);
+ __ vcmp(d1, 0.0);
+ __ vmrs(pc);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // Check if the value is 'null'.
+ // 'null' => false.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos_, ip);
+ __ b(&false_result, eq);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
+ __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+ __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, eq);
+
+ // JavaScript object => true.
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // Check for string
+ __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt);
+
+ // String value => false iff empty, i.e., length is zero
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false .
+ __ bind(&false_result);
+ __ mov(tos_, Operand(0));
+ __ Ret();
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi. The operands are in r0 and r1. In order
+// to call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in r0 and r1 (for the
+// value in r1) and r2 and r3 (for the value in r0).
+void GenericBinaryOpStub::HandleBinaryOpSlowCases(
+ MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin) {
+ Label slow, slow_reverse, do_the_call;
+ bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
+
+ ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
+
+ if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Smi-smi case (overflow).
+ // Since both are Smis there is no heap number to overwrite, so allocate.
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
+
+ // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+ // using registers d7 and d6 for the double values.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt_f64_s32(d7, s15);
+ __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
+ __ mov(r7, Operand(rhs));
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
+ __ mov(r7, Operand(lhs));
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ __ jmp(&do_the_call); // Tail call. No return.
+ }
+
+ // We branch here if at least one of r0 and r1 is not a Smi.
+ __ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // After this point we have the left hand side in r1 and the right hand side
+ // in r0.
+ if (lhs.is(r0)) {
+ __ Swap(r0, r1, ip);
+ }
+
+ // The type transition also calculates the answer.
+ bool generate_code_to_calculate_answer = true;
+
+ if (ShouldGenerateFPCode()) {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ GenerateTypeTransition(masm); // Tail call.
+ generate_code_to_calculate_answer = false;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (generate_code_to_calculate_answer) {
+ Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+ if (mode_ == NO_OVERWRITE) {
+ // In the case where there is no chance of an overwritable float we may
+ // as well do the allocation immediately while r0 and r1 are untouched.
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
+ }
+
+ // Move r0 to a double in r2-r3.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ mov(r5, Operand(r0)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r0);
+ __ bind(&r0_is_smi);
+ if (mode_ == OVERWRITE_RIGHT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt_f64_s32(d7, s15);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ }
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
+ // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
+ Label r1_is_not_smi;
+ if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &r1_is_not_smi);
+ GenerateTypeTransition(masm); // Tail call.
+ }
+
+ __ bind(&finished_loading_r0);
+
+ // Move r1 to a double in r0-r1.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
+ __ bind(&r1_is_not_smi);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ if (mode_ == OVERWRITE_LEFT) {
+ __ mov(r5, Operand(r1)); // Overwrite this heap number.
+ }
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ }
+ __ jmp(&finished_loading_r1);
+ __ bind(&r1_is_smi);
+ if (mode_ == OVERWRITE_LEFT) {
+ // We can't overwrite a Smi so get address of new heap number into r5.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ __ bind(&finished_loading_r1);
+ }
+
+ if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
+ __ bind(&do_the_call);
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+
+ if (Token::MUL == op_) {
+ __ vmul(d5, d6, d7);
+ } else if (Token::DIV == op_) {
+ __ vdiv(d5, d6, d7);
+ } else if (Token::ADD == op_) {
+ __ vadd(d5, d6, d7);
+ } else if (Token::SUB == op_) {
+ __ vsub(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ mov(pc, lr);
+ } else {
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
+ __ push(lr); // For later.
+ __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+ #if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(r4, r5, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
+ #else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ #endif
+ __ mov(r0, Operand(r5));
+ // And we are done.
+ __ pop(pc);
+ }
+ }
+ }
+
+ if (!generate_code_to_calculate_answer &&
+ !slow_reverse.is_linked() &&
+ !slow.is_linked()) {
+ return;
+ }
+
+ if (lhs.is(r0)) {
+ __ b(&slow);
+ __ bind(&slow_reverse);
+ __ Swap(r0, r1, ip);
+ }
+
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
+ // We jump to here if something goes wrong (one param is not a number of any
+ // sort or new-space allocation fails).
+ __ bind(&slow);
+
+ // Push arguments to the stack
+ __ Push(r1, r0);
+
+ if (Token::ADD == op_) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[4] : first argument
+
+ Label not_strings, not_string1, string1, string1_smi2;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &not_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1_smi2);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, r0, r2, r4, r5, r6, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ str(r2, MemOperand(sp, 0));
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &not_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(&not_strings);
+ }
+
+ __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Fastest for doubles that are in the ranges
+// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
+// almost to the range of signed int32 values that are not Smis. Jumps to the
+// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
+// (excluding the endpoints).
+static void GetInt32(MacroAssembler* masm,
+ Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ Label* slow) {
+ Label right_exponent, done;
+ // Get exponent word.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ Ubfx(scratch2,
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ __ mov(dest, Operand(0));
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+ // split it up to avoid a constant pool entry. You can't do that in general
+ // for cmp because of the overflow flag, but we know the exponent is in the
+ // range 0-2047 so there is no overflow.
+ int fudge_factor = 0x400;
+ __ sub(scratch2, scratch2, Operand(fudge_factor));
+ __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ __ b(eq, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ b(gt, slow);
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+ __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
+ // Dest already has a Smi zero.
+ __ b(lt, &done);
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
+ // get how much to shift down.
+ __ rsb(dest, scratch2, Operand(30));
+ }
+ __ bind(&right_exponent);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions implementing double precision to integer
+ // conversion using round to zero.
+ __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ __ vmov(d7, scratch2, scratch);
+ __ vcvt_s32_f64(s15, d7);
+ __ vmov(dest, s15);
+ } else {
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ }
+ __ bind(&done);
+}
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value. We truncate towards zero as required
+// by the ES spec. If this is the case we do the bitwise op and see if the
+// result is a Smi. If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in lhs and rhs. On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ Label slow, result_not_a_smi;
+ Label rhs_is_smi, lhs_is_smi;
+ Label done_checking_rhs, done_checking_lhs;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ __ tst(lhs, Operand(kSmiTagMask));
+ __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ GetInt32(masm, lhs, r3, r5, r4, &slow);
+ __ jmp(&done_checking_lhs);
+ __ bind(&lhs_is_smi);
+ __ mov(r3, Operand(lhs, ASR, 1));
+ __ bind(&done_checking_lhs);
+
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
+ __ b(ne, &slow);
+ GetInt32(masm, rhs, r2, r5, r4, &slow);
+ __ jmp(&done_checking_rhs);
+ __ bind(&rhs_is_smi);
+ __ mov(r2, Operand(rhs, ASR, 1));
+ __ bind(&done_checking_rhs);
+
+ ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
+
+ // r0 and r1: Original operands (Smi or heap numbers).
+ // r2 and r3: Signed int32 operands.
+ switch (op_) {
+ case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
+ case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
+ case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of writing
+ // the register as an unsigned int so we go to slow case if we hit this
+ // case.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ __ b(mi, &slow);
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default: UNREACHABLE();
+ }
+ // check that the *signed* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ __ b(mi, &result_not_a_smi);
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ __ Ret();
+
+ Label have_to_allocate, got_a_heap_number;
+ __ bind(&result_not_a_smi);
+ switch (mode_) {
+ case OVERWRITE_RIGHT: {
+ __ tst(rhs, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(rhs));
+ break;
+ }
+ case OVERWRITE_LEFT: {
+ __ tst(lhs, Operand(kSmiTagMask));
+ __ b(eq, &have_to_allocate);
+ __ mov(r5, Operand(lhs));
+ break;
+ }
+ case NO_OVERWRITE: {
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ }
+ default: break;
+ }
+ __ bind(&got_a_heap_number);
+ // r2: Answer as signed int32.
+ // r5: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(r5));
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
+
+ if (mode_ != NO_OVERWRITE) {
+ __ bind(&have_to_allocate);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
+ __ jmp(&got_a_heap_number);
+ }
+
+ // If all else failed then we go to the runtime system.
+ __ bind(&slow);
+ __ Push(lhs, rhs); // Restore stack.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+
+
+// This function takes the known int in a register for the cases
+// where it doesn't know a good trick, and may deliver
+// a result that needs shifting.
+static void MultiplyByKnownIntInStub(
+ MacroAssembler* masm,
+ Register result,
+ Register source,
+ Register known_int_register, // Smi tagged.
+ int known_int,
+ int* required_shift) { // Including Smi tag shift
+ switch (known_int) {
+ case 3:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 1;
+ break;
+ case 5:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 1;
+ break;
+ case 6:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 2;
+ break;
+ case 7:
+ __ rsb(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 9:
+ __ add(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 10:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 2;
+ break;
+ default:
+ ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
+ __ mul(result, source, known_int_register);
+ *required_shift = 0;
+ }
+}
+
+
+// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
+// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
+// Takes the sum of the digits base (mask + 1) repeatedly until we have a
+// number from 0 to mask. On exit the 'eq' condition flags are set if the
+// answer is exactly the mask.
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ bic(scratch, lhs, Operand(mask));
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift1));
+ __ add(lhs, lhs, Operand(scratch, LSR, shift2));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+// Splits the number into two halves (bottom half has shift bits). The top
+// half is subtracted from the bottom half. If the result is negative then
+// rhs is added.
+void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs) {
+ int mask = (1 << shift) - 1;
+ __ and_(ip, lhs, Operand(mask));
+ __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
+ __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
+}
+
+
+void IntegerModStub::ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator) {
+ int limit = denominator;
+ while (limit * 2 <= max) limit *= 2;
+ while (limit >= denominator) {
+ __ cmp(lhs, Operand(limit));
+ __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
+ limit >>= 1;
+ }
+}
+
+
+void IntegerModStub::ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits) {
+ __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
+ __ Ret();
+}
+
+
+// See comment for class.
+void IntegerModStub::Generate(MacroAssembler* masm) {
+ __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
+ __ bic(odd_number_, odd_number_, Operand(1));
+ __ mov(odd_number_, Operand(odd_number_, LSL, 1));
+ // We now have (odd_number_ - 1) * 2 in the register.
+ // Build a switch out of branches instead of data because it avoids
+ // having to teach the assembler about intra-code-object pointers
+ // that are not in relative branch instructions.
+ Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
+ Label mod21, mod23, mod25;
+ { Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ add(pc, pc, Operand(odd_number_));
+ // When you read pc it is always 8 ahead, but when you write it you always
+ // write the actual value. So we put in two nops to take up the slack.
+ __ nop();
+ __ nop();
+ __ b(&mod3);
+ __ b(&mod5);
+ __ b(&mod7);
+ __ b(&mod9);
+ __ b(&mod11);
+ __ b(&mod13);
+ __ b(&mod15);
+ __ b(&mod17);
+ __ b(&mod19);
+ __ b(&mod21);
+ __ b(&mod23);
+ __ b(&mod25);
+ }
+
+ // For each denominator we find a multiple that is almost only ones
+ // when expressed in binary. Then we do the sum-of-digits trick for
+ // that number. If the multiple is not 1 then we have to do a little
+ // more work afterwards to get the answer into the 0-denominator-1
+ // range.
+ DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
+ __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
+ ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
+ __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
+ ModReduce(masm, lhs_, 0x3f, 11);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 13);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
+ __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 19);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
+ ModReduce(masm, lhs_, 0x3f, 21);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
+ ModReduce(masm, lhs_, 0xff, 23);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
+ ModReduce(masm, lhs_, 0x7f, 25);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ // lhs_ : x
+ // rhs_ : y
+ // r0 : result
+
+ Register result = r0;
+ Register lhs = lhs_;
+ Register rhs = rhs_;
+
+ // This code can't cope with other register allocations yet.
+ ASSERT(result.is(r0) &&
+ ((lhs.is(r0) && rhs.is(r1)) ||
+ (lhs.is(r1) && rhs.is(r0))));
+
+ Register smi_test_reg = r7;
+ Register scratch = r9;
+
+ // All ops need to know whether we are dealing with two Smis. Set up
+ // smi_test_reg to tell us that.
+ if (ShouldGenerateSmiCode()) {
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ Label not_smi;
+ // Fast path.
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
+ break;
+ }
+
+ case Token::SUB: {
+ Label not_smi;
+ // Fast path.
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ if (lhs.is(r1)) {
+ __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
+ } else {
+ __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
+ }
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
+ break;
+ }
+
+ case Token::MUL: {
+ Label not_smi, slow;
+ if (ShouldGenerateSmiCode()) {
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ __ b(ne, &not_smi);
+ // Remove tag from one operand (but keep sign), so that result is Smi.
+ __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
+ // Do multiplication
+ // scratch = lower 32 bits of ip * lhs.
+ __ smull(scratch, scratch2, lhs, ip);
+ // Go slow on overflows (overflow bit is not set).
+ __ mov(ip, Operand(scratch, ASR, 31));
+ // No overflow if higher 33 bits are identical.
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, &slow);
+ // Go slow on zero result to handle -0.
+ __ tst(scratch, Operand(scratch));
+ __ mov(result, Operand(scratch), LeaveCC, ne);
+ __ Ret(ne);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(scratch2, rhs, Operand(lhs), SetCC);
+ __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
+ // Slow case. We fall through here if we multiplied a negative number
+ // with 0, because that would mean we should produce -0.
+ __ bind(&slow);
+ }
+ HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
+ break;
+ }
+
+ case Token::DIV:
+ case Token::MOD: {
+ Label not_smi;
+ if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
+ Label lhs_is_unsuitable;
+ __ BranchOnNotSmi(lhs, &not_smi);
+ if (IsPowerOf2(constant_rhs_)) {
+ if (op_ == Token::MOD) {
+ __ and_(rhs,
+ lhs,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
+ SetCC);
+ // We now have the answer, but if the input was negative we also
+ // have the sign bit. Our work is done if the result is
+ // positive or zero:
+ if (!rhs.is(r0)) {
+ __ mov(r0, rhs, LeaveCC, pl);
+ }
+ __ Ret(pl);
+ // A mod of a negative left hand side must return a negative number.
+ // Unfortunately if the answer is 0 then we must return -0. And we
+ // already optimistically trashed rhs so we may need to restore it.
+ __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
+ // Next two instructions are conditional on the answer being -0.
+ __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+ __ b(eq, &lhs_is_unsuitable);
+ // We need to subtract the dividend. Eg. -3 % 4 == -3.
+ __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
+ } else {
+ ASSERT(op_ == Token::DIV);
+ __ tst(lhs,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
+ __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
+ int shift = 0;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ d >>= 1;
+ shift++;
+ }
+ __ mov(r0, Operand(lhs, LSR, shift));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ }
+ } else {
+ // Not a power of 2.
+ __ tst(lhs, Operand(0x80000000u));
+ __ b(ne, &lhs_is_unsuitable);
+ // Find a fixed point reciprocal of the divisor so we can divide by
+ // multiplying.
+ double divisor = 1.0 / constant_rhs_;
+ int shift = 32;
+ double scale = 4294967296.0; // 1 << 32.
+ uint32_t mul;
+ // Maximise the precision of the fixed point reciprocal.
+ while (true) {
+ mul = static_cast<uint32_t>(scale * divisor);
+ if (mul >= 0x7fffffff) break;
+ scale *= 2.0;
+ shift++;
+ }
+ mul++;
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ __ mov(scratch2, Operand(mul));
+ __ umull(scratch, scratch2, scratch2, lhs);
+ __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
+ // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
+ // rhs is still the known rhs. rhs is Smi tagged.
+ // lhs is still the unkown lhs. lhs is Smi tagged.
+ int required_scratch_shift = 0; // Including the Smi tag shift of 1.
+ // scratch = scratch2 * rhs.
+ MultiplyByKnownIntInStub(masm,
+ scratch,
+ scratch2,
+ rhs,
+ constant_rhs_,
+ &required_scratch_shift);
+ // scratch << required_scratch_shift is now the Smi tagged rhs *
+ // (lhs / rhs) where / indicates integer division.
+ if (op_ == Token::DIV) {
+ __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
+ __ b(ne, &lhs_is_unsuitable); // There was a remainder.
+ __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
+ } else {
+ ASSERT(op_ == Token::MOD);
+ __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
+ }
+ }
+ __ Ret();
+ __ bind(&lhs_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense. We can handle
+ // 1 to 25 times any power of 2. This covers over half the numbers from
+ // 1 to 100 including all of the first 25. (Actually the constants < 10
+ // are handled above by reciprocal multiplication. We only get here for
+ // those cases if the right hand side is not a constant or for cases
+ // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
+ // stub.)
+ Label slow;
+ Label not_power_of_2;
+ ASSERT(!ShouldGenerateSmiCode());
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ Register mask_bits = r3;
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ and_(mask_bits, rhs, Operand(scratch), SetCC);
+ __ b(ne, &not_power_of_2);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+
+ __ bind(&not_power_of_2);
+ __ eor(scratch, scratch, Operand(mask_bits));
+ // At least two bits are set in the modulus. The high one(s) are in
+ // mask_bits and the low one is scratch + 1.
+ __ and_(mask_bits, scratch, Operand(lhs));
+ Register shift_distance = scratch;
+ scratch = no_reg;
+
+ // The rhs consists of a power of 2 multiplied by some odd number.
+ // The power-of-2 part we handle by putting the corresponding bits
+ // from the lhs in the mask_bits register, and the power in the
+ // shift_distance register. Shift distance is never 0 due to Smi
+ // tagging.
+ __ CountLeadingZeros(r4, shift_distance, shift_distance);
+ __ rsb(shift_distance, r4, Operand(32));
+
+ // Now we need to find out what the odd number is. The last bit is
+ // always 1.
+ Register odd_number = r4;
+ __ mov(odd_number, Operand(rhs, LSR, shift_distance));
+ __ cmp(odd_number, Operand(25));
+ __ b(gt, &slow);
+
+ IntegerModStub stub(
+ result, shift_distance, odd_number, mask_bits, lhs, r5);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
+
+ __ bind(&slow);
+ }
+ HandleBinaryOpSlowCases(
+ masm,
+ &not_smi,
+ lhs,
+ rhs,
+ op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label slow;
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ Register scratch2 = smi_test_reg;
+ smi_test_reg = no_reg;
+ switch (op_) {
+ case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
+ case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
+ case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(result, Operand(lhs, ASR, scratch2));
+ // Smi tag result.
+ __ bic(result, result, Operand(kSmiTagMask));
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(scratch, Operand(scratch, LSR, scratch2));
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ tst(scratch, Operand(0xc0000000));
+ __ b(ne, &slow);
+ // Smi tag result.
+ __ mov(result, Operand(scratch, LSL, kSmiTagSize));
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
+ __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+ __ mov(scratch, Operand(scratch, LSL, scratch2));
+ // Check that the signed result fits in a Smi.
+ __ add(scratch2, scratch, Operand(0x40000000), SetCC);
+ __ b(mi, &slow);
+ __ mov(result, Operand(scratch, LSL, kSmiTagSize));
+ break;
+ default: UNREACHABLE();
+ }
+ __ Ret();
+ __ bind(&slow);
+ HandleNonSmiBitwiseOp(masm, lhs, rhs);
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+ // This code should be unreachable.
+ __ stop("Unreachable");
+
+ // Generate an unreachable reference to the DEFAULT stub so that it can be
+ // found at the end of this stub when clearing ICs at GC.
+ // TODO(kaznacheev): Check performance impact and get rid of this.
+ if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+ GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+ __ CallStub(&uninit);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
+ __ Push(r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Argument is a number and is on stack and in r0.
+ Label runtime_call;
+ Label input_not_smi;
+ Label loaded;
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Load argument and check if it is a smi.
+ __ BranchOnNotSmi(r0, &input_not_smi);
+
+ CpuFeatures::Scope scope(VFP3);
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &runtime_call,
+ true);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
+
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ __ mov(r0,
+ Operand(ExternalReference::transcendental_cache_array_address()));
+ // r0 points to cache array.
+ __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &runtime_call);
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(r0, r0, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ b(ne, &runtime_call);
+ __ cmp(r3, r5);
+ __ b(ne, &runtime_call);
+ // Cache hit. Load result, pop argument and return.
+ __ mov(r0, Operand(r6));
+ __ pop();
+ __ Ret();
+ }
+
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Do tail-call to runtime routine. Runtime routines expect at least one
+ // argument, so give it a Smi.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ push(r0);
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+
+ __ StubReturn(1);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
+
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ if (negative_zero_ == kStrictNegativeZero) {
+ // If we have to check for zero, then we can check for the max negative
+ // smi while we are at it.
+ __ bic(ip, r0, Operand(0x80000000), SetCC);
+ __ b(eq, &slow);
+ __ rsb(r0, r0, Operand(0));
+ __ StubReturn(1);
+ } else {
+ // The value of the expression is a smi and 0 is OK for -0. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r0, r0, Operand(0), SetCC);
+ __ StubReturn(1, vc);
+ // We don't have to reverse the optimistic neg since the only case
+ // where we fall through is the minimum negative Smi, which is the case
+ // where the neg leaves the register unchanged.
+ __ jmp(&slow); // Go slow on max negative Smi.
+ }
+
+ __ bind(&try_float);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ GetInt32(masm, r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_ == UNARY_OVERWRITE) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ mov(r0, Operand(r2));
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ push(r0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // r0 holds the exception.
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r2);
+ __ str(r2, MemOperand(r3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ ldr(r2, MemOperand(sp, kStateOffset));
+ __ cmp(r2, Operand(StackHandler::ENTRY));
+ __ b(eq, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ ldr(sp, MemOperand(sp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(r2);
+ __ str(r2, MemOperand(r3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(r0, Operand(false));
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(r2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // lr
+
+ // Discard handler state (r2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate,
+ int frame_alignment_skew) {
+ // r0: result parameter for PerformGC, if any
+ // r4: number of arguments including receiver (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to the first argument (C callee-saved)
+
+ if (do_gc) {
+ // Passing r0.
+ __ PrepareCallCFunction(1, r1);
+ __ CallCFunction(ExternalReference::perform_gc_function(), 1);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate) {
+ __ mov(r0, Operand(scope_depth));
+ __ ldr(r1, MemOperand(r0));
+ __ add(r1, r1, Operand(1));
+ __ str(r1, MemOperand(r0));
+ }
+
+ // Call C built-in.
+ // r0 = argc, r1 = argv
+ __ mov(r0, Operand(r4));
+ __ mov(r1, Operand(r6));
+
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+#if defined(V8_HOST_ARCH_ARM)
+ if (FLAG_debug_code) {
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ sub(r2, sp, Operand(frame_alignment_skew));
+ __ tst(r2, Operand(frame_alignment_mask));
+ __ b(eq, &alignment_as_expected);
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ __ stop("Unexpected alignment");
+ __ bind(&alignment_as_expected);
+ }
+ }
+#endif
+
+ // Just before the call (jump) below lr is pushed, so the actual alignment is
+ // adding one to the current skew.
+ int alignment_before_call =
+ (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
+ if (alignment_before_call > 0) {
+ // Push until the alignment before the call is met.
+ __ mov(r2, Operand(0));
+ for (int i = alignment_before_call;
+ (i & frame_alignment_mask) != 0;
+ i += kPointerSize) {
+ __ push(r2);
+ }
+ }
+
+ // TODO(1242173): To let the GC traverse the return address of the exit
+ // frames, we need to know where the return address is. Right now,
+ // we push it on the stack to be able to find it again, but we never
+ // restore from it in case of changes, which makes it impossible to
+ // support moving the C entry code stub. This should be fixed, but currently
+ // this is OK because the CEntryStub gets generated so early in the V8 boot
+ // sequence that it is not moving ever.
+ masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
+ masm->push(lr);
+ masm->Jump(r5);
+
+ // Restore sp back to before aligning the stack.
+ if (alignment_before_call > 0) {
+ __ add(sp, sp, Operand(alignment_before_call));
+ }
+
+ if (always_allocate) {
+ // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
+ // though (contain the result).
+ __ mov(r2, Operand(scope_depth));
+ __ ldr(r3, MemOperand(r2));
+ __ sub(r3, r3, Operand(1));
+ __ str(r3, MemOperand(r2));
+ }
+
+ // check for failure result
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ // Lower 2 bits of r2 are 0 iff r0 has failure tag.
+ __ add(r2, r0, Operand(1));
+ __ tst(r2, Operand(kFailureTagMask));
+ __ b(eq, &failure_returned);
+
+ // Exit C frame and return.
+ // r0:r1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame();
+
+ // check if we should retry or throw exception
+ Label retry;
+ __ bind(&failure_returned);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ b(eq, &retry);
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ b(eq, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r3, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r0, MemOperand(ip));
+ __ str(r3, MemOperand(ip));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(r0, Operand(Factory::termination_exception()));
+ __ b(eq, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // r0: number of arguments including receiver
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // Result returned in r0 or r0+r1 by default.
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // r4: number of arguments (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to first argument (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false,
+ -kPointerSize);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false,
+ 0);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true,
+ kPointerSize);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+ Label invoke, exit;
+
+ // Called from C, so do not pop argc and args on exit (preserve sp)
+ // No need to save register-passed args
+ // Save callee-saved registers (incl. cp and fp), sp, and lr
+ __ stm(db_w, sp, kCalleeSaved | lr.bit());
+
+ // Get address of argv, see stm above.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r6, Operand(Smi::FromInt(marker)));
+ __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ ldr(r5, MemOperand(r5));
+ __ Push(r8, r7, r6, r5);
+
+ // Setup frame pointer for the frame to be pushed.
+ __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Call a faked try-block that does the invoke.
+ __ bl(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(ip));
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r0-r4, r5-r7 are available.
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bl(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r5, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r5, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(ip, Operand(entry));
+ }
+ __ ldr(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline. We don't use the double underscore
+ // macro for the add instruction because we don't want the coverage tool
+ // inserting instructions here after we read the pc.
+ __ mov(lr, Operand(pc));
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r3, MemOperand(ip));
+ // No need to restore registers
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+
+
+ __ bind(&exit); // r0 holds result
+ // Restore the top frame descriptors from the stack.
+ __ pop(r3);
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(r3, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Restore callee-saved registers and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+}
+
+
+// This stub performs an instanceof, calling the builtin function if
+// necessary. Uses r1 for the object, r0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - slow case for smis (we may need to throw an exception
+ // depending on the rhs).
+ Label slow, loop, is_instance, is_not_instance;
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+ __ BranchOnSmi(r0, &slow);
+
+ // Check that the left hand is a JS object and put map in r3.
+ __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ // Get the prototype of the function (r4 is result, r2 is scratch).
+ __ ldr(r1, MemOperand(sp, 0));
+ // r1 is function, r3 is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr));
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ BranchOnSmi(r4, &slow);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+ __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(gt, &slow);
+
+ __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
+
+ // Register mapping: r3 is object map and r4 is function prototype.
+ // Get prototype of object into r2.
+ __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ bind(&loop);
+ __ cmp(r2, Operand(r4));
+ __ b(eq, &is_instance);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(eq, &is_not_instance);
+ __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&is_not_instance);
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ pop();
+ __ pop();
+ __ mov(pc, Operand(lr)); // Return.
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ BranchOnNotSmi(r1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register r0. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the adaptor frame and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ ldr(r1, MemOperand(sp, 0));
+ __ b(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r1, MemOperand(sp, 0));
+ __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ cmp(r1, Operand(0));
+ __ b(eq, &add_arguments_object);
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
+ __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(
+ r1,
+ r0,
+ r2,
+ r3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ ldr(r4, MemOperand(r4, offset));
+
+ // Copy the JS object part.
+ __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ cmp(r1, Operand(0));
+ __ b(eq, &done);
+
+ // Get the parameters pointer from the stack.
+ __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
+ __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup r4 to point to the first array slot.
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement r2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+ // Post-increment r4 with kPointerSize on each iteration.
+ __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
+ __ sub(r1, r1, Operand(1));
+ __ cmp(r1, Operand(0));
+ __ b(ne, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ Register subject = r4;
+ Register regexp_data = r5;
+ Register last_match_info_elements = r6;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(r0, Operand(address_of_regexp_stack_memory_size));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ tst(r0, Operand(r0));
+ __ b(eq, &runtime);
+
+ // Check that the first argument is a JSRegExp object.
+ __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+ __ b(ne, &runtime);
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ tst(regexp_data, Operand(kSmiTagMask));
+ __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+ __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
+ __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ b(ne, &runtime);
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ ldr(r2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(r2, r2, Operand(2)); // r2 was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+ __ b(hi, &runtime);
+
+ // r2: Number of capture registers
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the second argument is a string.
+ __ ldr(subject, MemOperand(sp, kSubjectOffset));
+ __ tst(subject, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ Condition is_string = masm->IsObjectStringType(subject, r0);
+ __ b(NegateCondition(is_string), &runtime);
+ // Get the length of the string to r3.
+ __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
+
+ // r2: Number of capture registers
+ // r3: Length of subject string as a smi
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &runtime);
+ __ cmp(r3, Operand(r0));
+ __ b(ls, &runtime);
+
+ // r2: Number of capture registers
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the fourth object is a JSArray object.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
+ __ b(gt, &runtime);
+
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string;
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // First check for flat string.
+ __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ b(eq, &seq_string);
+
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
+ __ b(ne, &runtime);
+ __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
+ __ cmp(r0, r1);
+ __ b(ne, &runtime);
+ __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r0, Operand(kStringRepresentationMask));
+ __ b(nz, &runtime);
+
+ __ bind(&seq_string);
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // r0: Instance type of subject string
+ STATIC_ASSERT(4 == kAsciiStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Find the code object based on the assumptions above.
+ __ and_(r0, r0, Operand(kStringEncodingMask));
+ __ mov(r3, Operand(r0, ASR, 2), SetCC);
+ __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CompareObjectType(r7, r0, r0, CODE_TYPE);
+ __ b(ne, &runtime);
+
+ // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r7: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+
+ // r1: previous index
+ // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r7: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ push(lr);
+ __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
+
+ // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
+ __ mov(r0, Operand(1));
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
+ __ mov(r0, Operand(address_of_regexp_stack_memory_address));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ mov(r2, Operand(address_of_regexp_stack_memory_size));
+ __ ldr(r2, MemOperand(r2, 0));
+ __ add(r0, r0, Operand(r2));
+ __ str(r0, MemOperand(sp, 1 * kPointerSize));
+
+ // Argument 5 (sp[0]): static offsets vector buffer.
+ __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
+ __ str(r0, MemOperand(sp, 0 * kPointerSize));
+
+ // For arguments 4 and 3 get string length, calculate start of string data and
+ // calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ eor(r3, r3, Operand(1));
+ // Argument 4 (r3): End of string data
+ // Argument 3 (r2): Start of string data
+ __ add(r2, r9, Operand(r1, LSL, r3));
+ __ add(r3, r9, Operand(r0, LSL, r3));
+
+ // Argument 2 (r1): Previous index.
+ // Already there
+
+ // Argument 1 (r0): Subject string.
+ __ mov(r0, subject);
+
+ // Locate the code entry and call it.
+ __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r7, kRegExpExecuteArguments);
+ __ pop(lr);
+
+ // r0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+
+ // Check the result.
+ Label success;
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ __ b(eq, &success);
+ Label failure;
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ b(eq, &failure);
+ __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ b(ne, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r0, MemOperand(r0, 0));
+ __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r1, MemOperand(r1, 0));
+ __ cmp(r0, r1);
+ __ b(eq, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(r0, Operand(Factory::null_value()));
+ __ add(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ ldr(r1,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(r1, r1, Operand(2)); // r1 was a smi.
+
+ // r1: number of capture registers
+ // r4: subject string
+ // Store the capture count.
+ __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
+ __ str(r2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
+ __ str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ str(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ mov(r3, last_match_info_elements);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(r2, Operand(address_of_static_offsets_vector));
+
+ // r1: number of capture registers
+ // r2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ add(r0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ b(mi, &done);
+ // Read the value from the static offsets vector buffer.
+ __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
+ // Store the smi value in the last match info.
+ __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+ __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ add(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ BranchOnSmi(r1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ LeaveInternalFrame();
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ BranchOnSmi(r1, &slow);
+ // Get the map of the function object.
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+
+ // Fast-case: Invoke the function now.
+ // r1: pushed function
+ ParameterCount actual(argc_);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ mov(r0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(r2, Operand(0));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
+ const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == eq || cc_ == ne)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s%s%s",
+ cc_name,
+ lhs_name,
+ rhs_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+ | RegisterField::encode(lhs_.is(r0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ BranchOnSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ tst(result_, Operand(kIsNotStringMask));
+ __ b(ne, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ BranchOnNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmp(ip, Operand(scratch_));
+ __ b(ls, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
+
+ // Handle non-flat strings.
+ __ tst(result_, Operand(kIsConsStringMask));
+ __ b(eq, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(ne, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(nz, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ tst(result_, Operand(kStringEncodingMask));
+ __ b(nz, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ add(scratch_, object_, Operand(scratch_));
+ __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ true);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ Move(scratch_, r0);
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Move(result_, r0);
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ tst(code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ b(nz, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ascii char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(eq, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, r0);
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0));
+ }
+ __ b(eq, &done);
+
+ __ bind(&loop);
+ __ ldrb(scratch, MemOperand(src, 1, PostIndex));
+ // Perform sub between load and dependent store to get the load time to
+ // complete.
+ __ sub(count, count, Operand(1), SetCC);
+ __ strb(scratch, MemOperand(dest, 1, PostIndex));
+ // last iteration.
+ __ b(gt, &loop);
+
+ __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ tst(dest, Operand(kPointerAlignmentMask));
+ __ Check(eq, "Destination of copy not aligned.");
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+ if (!ascii) {
+ __ add(count, count, Operand(count), SetCC);
+ } else {
+ __ cmp(count, Operand(0));
+ }
+ __ b(eq, &done);
+
+ // Assume that you cannot read (or write) unaligned.
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ cmp(count, Operand(8));
+ __ add(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ b(lt, &byte_loop);
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
+ Label dest_aligned;
+ __ b(eq, &dest_aligned);
+ __ cmp(scratch4, Operand(2));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
+ __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
+ __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
+ __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ sub(scratch4, dest, Operand(src));
+ __ and_(scratch4, scratch4, Operand(0x03), SetCC);
+ __ b(eq, &simple_loop);
+ // Shift register is number of bits in a source word that
+ // must be combined with bits in the next source word in order
+ // to create a destination word.
+
+ // Complex loop for src/dst that are not aligned the same way.
+ {
+ Label loop;
+ __ mov(scratch4, Operand(scratch4, LSL, 3));
+ Register left_shift = scratch4;
+ __ and_(src, src, Operand(~3)); // Round down to load previous word.
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ // Store the "shift" most significant bits of scratch in the least
+ // signficant bits (i.e., shift down by (32-shift)).
+ __ rsb(scratch2, left_shift, Operand(32));
+ Register right_shift = scratch2;
+ __ mov(scratch1, Operand(scratch1, LSR, right_shift));
+
+ __ bind(&loop);
+ __ ldr(scratch3, MemOperand(src, 4, PostIndex));
+ __ sub(scratch5, limit, Operand(dest));
+ __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ __ mov(scratch1, Operand(scratch3, LSR, right_shift));
+ // Loop if four or more bytes left to copy.
+ // Compare to eight, because we did the subtract before increasing dst.
+ __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ b(ge, &loop);
+ }
+ // There is now between zero and three bytes left to copy (negative that
+ // number is in scratch5), and between one and three bytes already read into
+ // scratch1 (eight times that number in scratch4). We may have read past
+ // the end of the string, but because objects are aligned, we have not read
+ // past the end of the object.
+ // Find the minimum of remaining characters to move and preloaded characters
+ // and write those as bytes.
+ __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ b(eq, &done);
+ __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ // Move minimum of bytes read and bytes left to copy to scratch4.
+ __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch5) characters already read into
+ // scratch ready to write.
+ __ cmp(scratch5, Operand(2));
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
+ __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
+ // Copy any remaining bytes.
+ __ b(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dst, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ ldr(scratch1, MemOperand(src, 4, PostIndex));
+ __ sub(scratch3, limit, Operand(dest));
+ __ str(scratch1, MemOperand(dest, 4, PostIndex));
+ // Compare to 8, not 4, because we do the substraction before increasing
+ // dest.
+ __ cmp(scratch3, Operand(8));
+ __ b(ge, &loop);
+ }
+
+ // Copy bytes from src to dst until dst hits limit.
+ __ bind(&byte_loop);
+ __ cmp(dest, Operand(limit));
+ __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
+ __ b(ge, &done);
+ __ strb(scratch1, MemOperand(dest, 1, PostIndex));
+ __ b(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ sub(scratch, c1, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+ __ b(hi, &not_array_index);
+ __ sub(scratch, c2, Operand(static_cast<int>('0')));
+ __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register
+ __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
+ __ b(ls, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Load undefined value
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ mov(mask, Operand(mask, ASR, 1));
+ __ sub(mask, mask, Operand(1));
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ add(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ Register candidate = scratch5; // Scratch register contains candidate.
+
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ and_(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ ldr(candidate,
+ MemOperand(first_symbol_table_element,
+ candidate,
+ LSL,
+ kPointerSizeLog2));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, undefined);
+ __ b(eq, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(2)));
+ __ b(ne, &next_probe[i]);
+
+ // Check that the candidate is a non-external ascii string.
+ __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
+ &next_probe[i]);
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ cmp(chars, scratch);
+ __ b(eq, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ Move(r0, result);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = character + (character << 10);
+ __ add(hash, character, Operand(character, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ add(hash, hash, Operand(character));
+ // hash += hash << 10;
+ __ add(hash, hash, Operand(hash, LSL, 10));
+ // hash ^= hash >> 6;
+ __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ add(hash, hash, Operand(hash, LSL, 3));
+ // hash ^= hash >> 11;
+ __ eor(hash, hash, Operand(hash, ASR, 11));
+ // hash += hash << 15;
+ __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+
+ // if (hash == 0) hash = 27;
+ __ mov(hash, Operand(27), LeaveCC, nz);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // lr: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+
+ // Check bounds and smi-ness.
+ __ ldr(r7, MemOperand(sp, kToOffset));
+ __ ldr(r6, MemOperand(sp, kFromOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ // I.e., arithmetic shift right by one un-smi-tags.
+ __ mov(r2, Operand(r7, ASR, 1), SetCC);
+ __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
+ // If either r2 or r6 had the smi tag bit set, then carry is set now.
+ __ b(cs, &runtime); // Either "from" or "to" is not a smi.
+ __ b(mi, &runtime); // From is negative.
+
+ __ sub(r2, r2, Operand(r3), SetCC);
+ __ b(mi, &runtime); // Fail if from > to.
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ cmp(r2, Operand(2));
+ __ b(lt, &runtime);
+
+ // r2: length
+ // r3: from index (untaged smi)
+ // r6: from (smi)
+ // r7: to (smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ ldr(r5, MemOperand(sp, kStringOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r5, Operand(kSmiTagMask));
+ __ b(eq, &runtime);
+ Condition is_string = masm->IsObjectStringType(r5, r1);
+ __ b(NegateCondition(is_string), &runtime);
+
+ // r1: instance type
+ // r2: length
+ // r3: from index (untaged smi)
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ Label seq_string;
+ __ and_(r4, r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+ __ cmp(r4, Operand(kConsStringTag));
+ __ b(gt, &runtime); // External strings go to runtime.
+ __ b(lt, &seq_string); // Sequential strings are handled directly.
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
+ __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ tst(r1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ b(ne, &runtime); // Cons and External strings go to runtime.
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // r1: instance type.
+ // r2: length
+ // r3: from index (untaged smi)
+ // r5: string
+ // r6: from (smi)
+ // r7: to (smi)
+ __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
+ __ cmp(r4, Operand(r7));
+ __ b(lt, &runtime); // Fail if to > length.
+
+ // r1: instance type.
+ // r2: result string length.
+ // r3: from index (untaged smi)
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat ascii string.
+ Label non_ascii_flat;
+ __ tst(r1, Operand(kStringEncodingMask));
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ b(eq, &non_ascii_flat);
+
+ Label result_longer_than_two;
+ __ cmp(r2, Operand(2));
+ __ b(gt, &result_longer_than_two);
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ add(r5, r5, Operand(r3));
+ __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
+ __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // r2: result string length.
+ // r3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
+ __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
+ // Allocate the result.
+ __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, Operand(r6, ASR, 1));
+
+ // r0: result string.
+ // r1: first character of result string.
+ // r2: result string length.
+ // r5: first character of sub string to copy.
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // r2: result string length.
+ // r5: string.
+ // r6: from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+ // r0: result string.
+ // r2: result string length.
+ // r5: string.
+ // Locate first character of result.
+ __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ add(r5, r5, Operand(r6));
+
+ // r0: result string.
+ // r1: first character of result.
+ // r2: result length.
+ // r5: first character of string to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label compare_lengths;
+ // Find minimum length and length difference.
+ __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+ Register length_delta = scratch3;
+ __ mov(scratch1, scratch2, LeaveCC, gt);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(min_length, Operand(min_length));
+ __ b(eq, &compare_lengths);
+
+ // Untag smi.
+ __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+
+ // Setup registers so that we only need to increment one register
+ // in the loop.
+ __ add(scratch2, min_length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch2));
+ __ add(right, right, Operand(scratch2));
+ // Registers left and right points to the min_length character of strings.
+ __ rsb(min_length, min_length, Operand(-1));
+ Register index = min_length;
+ // Index starts at -min_length.
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ add(index, index, Operand(1), SetCC);
+ __ ldrb(scratch2, MemOperand(left, index), ne);
+ __ ldrb(scratch4, MemOperand(right, index), ne);
+ // Skip to compare lengths with eq condition true.
+ __ b(eq, &compare_lengths);
+ __ cmp(scratch2, scratch4);
+ __ b(eq, &loop);
+ // Fallthrough with eq condition false.
+ }
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use zero length_delta as result.
+ __ mov(r0, Operand(length_delta), SetCC, eq);
+ // Fall through to here if characters compare not-equal.
+ __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+ __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(r0, r1);
+ __ b(ne, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+ // Compare flat ascii strings natively. Remove arguments from stack first.
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+ // Stack on entry:
+ // sp[0]: second argument.
+ // sp[4]: first argument.
+
+ // Load the two arguments.
+ __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+ // Load instance types.
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kStringTag == 0);
+ // If either is not a string, go to runtime.
+ __ tst(r4, Operand(kIsNotStringMask));
+ __ tst(r5, Operand(kIsNotStringMask), eq);
+ __ b(ne, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // r0: first string
+ // r1: second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
+ __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
+ STATIC_ASSERT(kSmiTag == 0);
+ // Else test if second string is empty.
+ __ cmp(r3, Operand(Smi::FromInt(0)), ne);
+ __ b(ne, &strings_not_empty); // If either string was empty, return r0.
+
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize));
+ // Both strings are non-empty.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ // Adding two lengths can't overflow.
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+ __ add(r6, r2, Operand(r3));
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(r6, Operand(2));
+ __ b(ne, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in r2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode)
+ __ mov(r6, Operand(2));
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+ __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ cmp(r6, Operand(String::kMinNonFlatLength));
+ __ b(lt, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ cmp(r6, Operand(String::kMaxLength + 1));
+ __ b(hs, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ascii the result is an ascii cons string.
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated, ascii_data;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ tst(r5, Operand(kStringEncodingMask), ne);
+ __ b(eq, &non_ascii);
+
+ // Allocate an ASCII cons string.
+ __ bind(&ascii_data);
+ __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // r0: first string
+ // r1: second string
+ // r2: length of first string
+ // r3: length of second string
+ // r4: first string instance type (if string_check_)
+ // r5: second string instance type (if string_check_)
+ // r6: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (!string_check_) {
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r4, Operand(kStringRepresentationMask));
+ __ tst(r5, Operand(kStringRepresentationMask), eq);
+ __ b(ne, &string_add_runtime);
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of lengths..
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ eor(r7, r4, Operand(r5));
+ __ tst(r7, Operand(kStringEncodingMask));
+ __ b(ne, &string_add_runtime);
+ // And see if it's ASCII or two-byte.
+ __ tst(r4, Operand(kStringEncodingMask));
+ __ b(eq, &non_ascii_string_add_flat_result);
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // r6: length of resulting flat string
+ __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+ // Load second argument and locate first character.
+ __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of length of strings.
+ __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+ // r0: first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r7: result string.
+
+ // Locate first character of result.
+ __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r0: first character of first string.
+ // r1: second string.
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: first character of result.
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+ // Locate first character of second argument.
+ __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // r1: first character of second string.
+ // r3: length of second string.
+ // r6: next character of result (after copy of first string).
+ // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+ __ mov(r0, Operand(r7));
+ __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
new file mode 100644
index 00000000..2e07e3b5
--- /dev/null
+++ b/src/arm/code-stubs-arm.h
@@ -0,0 +1,491 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CODE_STUBS_ARM_H_
+#define V8_ARM_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ static const int kUnknownIntValue = -1;
+
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ Register lhs,
+ Register rhs,
+ int constant_rhs = kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ lhs_(lhs),
+ rhs_(rhs),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) { }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ lhs_(LhsRegister(RegisterBits::decode(key))),
+ rhs_(RhsRegister(RegisterBits::decode(key))),
+ constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
+ runtime_operands_type_(type_info),
+ name_(NULL) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ Register lhs_;
+ Register rhs_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+ char* name_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+ static const int kKnownRhsKeyBits = 6;
+
+ // Minor key encoding in 17 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class TypeInfoBits: public BitField<int, 8, 2> {};
+ class RegisterBits: public BitField<bool, 10, 1> {};
+ class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt())
+ | TypeInfoBits::encode(runtime_operands_type_)
+ | RegisterBits::encode(lhs_.is(r0));
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+ void HandleBinaryOpSlowCases(MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
+ return key;
+ }
+
+ int KnownBitsForMinorKey(int key) {
+ if (!key) return 0;
+ if (key <= 11) return key - 1;
+ int d = 1;
+ while (key != 12) {
+ key--;
+ d <<= 1;
+ }
+ return d;
+ }
+
+ Register LhsRegister(bool lhs_is_r0) {
+ return lhs_is_r0 ? r0 : r1;
+ }
+
+ Register RhsRegister(bool lhs_is_r0) {
+ return lhs_is_r0 ? r1 : r0;
+ }
+
+ bool ShouldGenerateSmiCode() {
+ return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in r0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+// This stub can do a fast mod operation without using fp.
+// It is tail called from the GenericBinaryOpStub and it always
+// returns an answer. It never causes GC so it doesn't need a real frame.
+//
+// The inputs are always positive Smis. This is never called
+// where the denominator is a power of 2. We handle that separately.
+//
+// If we consider the denominator as an odd number multiplied by a power of 2,
+// then:
+// * The exponent (power of 2) is in the shift_distance register.
+// * The odd number is in the odd_number register. It is always in the range
+// of 3 to 25.
+// * The bits from the numerator that are to be copied to the answer (there are
+// shift_distance of them) are in the mask_bits register.
+// * The other bits of the numerator have been shifted down and are in the lhs
+// register.
+class IntegerModStub : public CodeStub {
+ public:
+ IntegerModStub(Register result,
+ Register shift_distance,
+ Register odd_number,
+ Register mask_bits,
+ Register lhs,
+ Register scratch)
+ : result_(result),
+ shift_distance_(shift_distance),
+ odd_number_(odd_number),
+ mask_bits_(mask_bits),
+ lhs_(lhs),
+ scratch_(scratch) {
+ // We don't code these in the minor key, so they should always be the same.
+ // We don't really want to fix that since this stub is rather large and we
+ // don't want many copies of it.
+ ASSERT(shift_distance_.is(r9));
+ ASSERT(odd_number_.is(r4));
+ ASSERT(mask_bits_.is(r3));
+ ASSERT(scratch_.is(r5));
+ }
+
+ private:
+ Register result_;
+ Register shift_distance_;
+ Register odd_number_;
+ Register mask_bits_;
+ Register lhs_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class ResultRegisterBits: public BitField<int, 0, 4> {};
+ class LhsRegisterBits: public BitField<int, 4, 4> {};
+
+ Major MajorKey() { return IntegerMod; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return ResultRegisterBits::encode(result_.code())
+ | LhsRegisterBits::encode(lhs_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "IntegerModStub"; }
+
+ // Utility functions.
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry);
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry);
+ void ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs);
+ void ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator);
+ void ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits);
+
+
+#ifdef DEBUG
+ void Print() { PrintF("IntegerModStub\n"); }
+#endif
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register offset, Register scratch)
+ : object_(object), offset_(offset), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register offset_;
+ Register scratch_;
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, offset and scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class OffsetBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ OffsetBits::encode(offset_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
+ " (scratch reg %d)\n",
+ object_.code(), offset_.code(), scratch_.code());
+ }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index e20be008..08a8da0f 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -49,27 +50,6 @@ namespace v8 {
namespace internal {
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc,
- bool never_nan_nan);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-static void MultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int);
-static bool IsEasyToMultiplyBy(int x);
-
-
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
@@ -1049,6 +1029,43 @@ static int BitPosition(unsigned x) {
}
+// Can we multiply by x with max two shifts and an add.
+// This answers yes to all integers from 2 to 10.
+static bool IsEasyToMultiplyBy(int x) {
+ if (x < 2) return false; // Avoid special cases.
+ if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
+ if (IsPowerOf2(x)) return true; // Simple shift.
+ if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
+ if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
+ return false;
+}
+
+
+// Can multiply by anything that IsEasyToMultiplyBy returns true for.
+// Source and destination may be the same register. This routine does
+// not set carry and overflow the way a mul instruction would.
+static void InlineMultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int) {
+ if (IsPowerOf2(known_int)) {
+ masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
+ } else if (PopCountLessThanEqual2(known_int)) {
+ int first_bit = BitPosition(known_int);
+ int second_bit = BitPosition(known_int ^ (1 << first_bit));
+ masm->add(destination, source,
+ Operand(source, LSL, second_bit - first_bit));
+ if (first_bit != 0) {
+ masm->mov(destination, Operand(destination, LSL, first_bit));
+ }
+ } else {
+ ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
+ int the_bit = BitPosition(known_int + 1);
+ masm->rsb(destination, source, Operand(source, LSL, the_bit));
+ }
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
@@ -1118,7 +1135,8 @@ void CodeGenerator::SmiOperation(Token::Value op,
frame_->EmitPush(lhs, TypeInfo::Smi());
TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
frame_->EmitPush(rhs, t);
- GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
+ GenericBinaryOpStub::kUnknownIntValue);
}
return;
}
@@ -1359,7 +1377,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
// brevity to comprehensiveness.
__ tst(tos, Operand(mask));
deferred->Branch(ne);
- MultiplyByKnownInt(masm_, tos, tos, int_value);
+ InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -3533,9 +3551,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -3633,9 +3649,7 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -3749,9 +3763,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(),
literal->handle(),
@@ -4179,11 +4191,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -4192,21 +4203,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
Load(args->at(i));
}
+ // Spill everything from here to simplify the implementation.
VirtualFrame::SpilledScope spilled_scope(frame_);
- // r0: the number of arguments.
+ // Load the argument count into r0 and the function into r1 as per
+ // calling convention.
__ mov(r0, Operand(arg_count));
- // Load the function into r1 as per calling convention.
- __ ldr(r1, frame_->ElementAt(arg_count + 1));
+ __ ldr(r1, frame_->ElementAt(arg_count));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
+ frame_->EmitPush(r0);
- // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
- __ str(r0, frame_->Top());
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -5295,6 +5306,13 @@ void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(ip));
__ b(ne, &done);
+ if (FLAG_debug_code) {
+ __ LoadRoot(r2, Heap::kEmptyFixedArrayRootIndex);
+ __ ldr(ip, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ cmp(ip, r2);
+ __ Check(eq, "JSRegExpResult: default map but non-empty properties.");
+ }
+
// All set, copy the contents to a new object.
__ AllocateInNewSpace(JSRegExpResult::kSize,
r2,
@@ -5310,7 +5328,6 @@ void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
__ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
__ stm(ia, r2,
r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
- ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize);
// Check whether elements array is empty fixed array, and otherwise make
// it copy-on-write (it never should be empty unless someone is messing
@@ -5620,6 +5637,27 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+ Register tmp = frame_->scratch0();
+ __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
+ __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
+ cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Register value = frame_->PopToRegister();
+
+ __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
+ __ IndexFromHash(value, value);
+ frame_->EmitPush(value);
+}
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
@@ -5733,9 +5771,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
@@ -6059,12 +6095,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Literal* rliteral = node->right()->AsLiteral();
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
- bool overwrite_left =
- (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
- bool overwrite_right =
- (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_left = node->left()->ResultOverwriteAllowed();
+ bool overwrite_right = node->right()->ResultOverwriteAllowed();
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
@@ -6133,47 +6165,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Expression* right = node->right();
Token::Value op = node->op();
- // To make null checks efficient, we check if either left or right is the
- // literal 'null'. If so, we optimize the code by inlining a null check
- // instead of calling the (very) general runtime routine for checking
- // equality.
- if (op == Token::EQ || op == Token::EQ_STRICT) {
- bool left_is_null =
- left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
- bool right_is_null =
- right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
- // The 'null' value can only be equal to 'null' or 'undefined'.
- if (left_is_null || right_is_null) {
- Load(left_is_null ? right : left);
- Register tos = frame_->PopToRegister();
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos, ip);
-
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- if (op != Token::EQ_STRICT) {
- true_target()->Branch(eq);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, Operand(ip));
- true_target()->Branch(eq);
-
- __ tst(tos, Operand(kSmiTagMask));
- false_target()->Branch(eq);
-
- // It can be an undetectable object.
- __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
- __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
- __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
- __ cmp(tos, Operand(1 << Map::kIsUndetectable));
- }
-
- cc_reg_ = eq;
- ASSERT(has_cc() && frame_->height() == original_height);
- return;
- }
- }
-
// To make typeof testing for natives implemented in JavaScript really
// efficient, we generate special code for expressions of the form:
// 'typeof <expression> == <string>'.
@@ -6334,6 +6325,40 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Register tos = frame_->PopToRegister();
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(tos, ip);
+
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ if (!node->is_strict()) {
+ true_target()->Branch(eq);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(tos, Operand(ip));
+ true_target()->Branch(eq);
+
+ __ tst(tos, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ // It can be an undetectable object.
+ __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+ __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
+ __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+ __ cmp(tos, Operand(1 << Map::kIsUndetectable));
+ }
+
+ cc_reg_ = eq;
+ ASSERT(has_cc() && frame_->height() == original_height);
+}
+
+
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
explicit DeferredReferenceGetNamedValue(Register receiver,
@@ -7058,1911 +7083,6 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
- __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ Push(cp, r3);
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // Setup the fixed slots.
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
- __ b(eq, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, expected_map_index);
- __ cmp(r3, ip);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- &slow_case,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
- Register exponent = result2_;
- Register mantissa = result1_;
-#endif
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- static const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0));
- __ Ret();
-
- __ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cc,
- bool never_nan_nan) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r0, r1);
- __ b(ne, &not_identical);
-
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cc != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cc == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
- }
-
- __ bind(&return_equal);
- if (cc == lt) {
- __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cc == gt) {
- __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- if (cc != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
- }
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r0 then there is already a non zero value in it.
- if (!rhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP3);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ jmp(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r0 then there is already a non zero value in it.
- if (!lhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0));
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0));
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cc == lt || cc == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments.
- __ CallCFunction(ExternalReference::compare_doubles(), 4);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // If either operand is a JSObject or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &first_non_object);
-
- // Return non-zero (r0 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r2, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r3, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(ne, &return_not_equal);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
- __ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r2, r3);
- __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
- Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
- __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, possible_strings);
-
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
- __ Ret();
-
- __ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ BranchOnSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ BranchOnSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ vcmp(d0, d1);
- __ vmrs(pc);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native,
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ add(offset_, object_, Operand(offset_));
- __ RecordWriteHelper(object_, offset_, scratch_);
- __ Ret();
-}
-
-
-// On entry lhs_ and rhs_ are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP3);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ vcmp(d7, d6);
- __ vmrs(pc); // Move vector status bits to normal status bits.
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc_ == lt || cc_ == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
- }
-
- Label check_for_symbols;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
- &both_loaded_as_doubles,
- &check_for_symbols,
- &flat_string_check);
-
- __ bind(&check_for_symbols);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
-
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
- r2,
- r3,
- r4,
- r5);
- // Never falls through to here.
-
- __ bind(&slow);
-
- __ Push(lhs_, rhs_);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_JS);
-}
-
-
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
-// The stub returns zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result;
- Label not_heap_number;
- Register scratch0 = VirtualFrame::scratch0();
-
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch0, ip);
- __ b(&not_heap_number, ne);
-
- __ sub(ip, tos_, Operand(kHeapObjectTag));
- __ vldr(d1, ip, HeapNumber::kValueOffset);
- __ vcmp(d1, 0.0);
- __ vmrs(pc);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN
- __ Ret();
-
- __ bind(&not_heap_number);
-
- // Check if the value is 'null'.
- // 'null' => false.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(tos_, ip);
- __ b(&false_result, eq);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch0, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
- __ Ret();
-
- // Return 0 in "tos_" for false .
- __ bind(&false_result);
- __ mov(tos_, Operand(0));
- __ Ret();
-}
-
-
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in r0 and r1. In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
- MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
- Label slow, slow_reverse, do_the_call;
- bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
-
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
- Register heap_number_map = r6;
-
- if (ShouldGenerateSmiCode()) {
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // Smi-smi case (overflow).
- // Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r3 and r7 are scratch.
- __ AllocateHeapNumber(
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
- // using registers d7 and d6 for the double values.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r9);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r9);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- __ jmp(&do_the_call); // Tail call. No return.
- }
-
- // We branch here if at least one of r0 and r1 is not a Smi.
- __ bind(not_smi);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- // After this point we have the left hand side in r1 and the right hand side
- // in r0.
- if (lhs.is(r0)) {
- __ Swap(r0, r1, ip);
- }
-
- // The type transition also calculates the answer.
- bool generate_code_to_calculate_answer = true;
-
- if (ShouldGenerateFPCode()) {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm); // Tail call.
- generate_code_to_calculate_answer = false;
- break;
-
- default:
- break;
- }
- }
-
- if (generate_code_to_calculate_answer) {
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
- if (mode_ == NO_OVERWRITE) {
- // In the case where there is no chance of an overwritable float we may
- // as well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
- }
-
- // Move r0 to a double in r2-r3.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_RIGHT) {
- __ mov(r5, Operand(r0)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r0 to d7.
- __ sub(r7, r0, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that second double is in r2 and r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r0);
- __ bind(&r0_is_smi);
- if (mode_ == OVERWRITE_RIGHT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r0 to double in d7.
- __ mov(r7, Operand(r0, ASR, kSmiTagSize));
- __ vmov(s15, r7);
- __ vcvt_f64_s32(d7, s15);
- if (!use_fp_registers) {
- __ vmov(r2, r3, d7);
- }
- } else {
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r4);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
- Label r1_is_not_smi;
- if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &r1_is_not_smi);
- GenerateTypeTransition(masm); // Tail call.
- }
-
- __ bind(&finished_loading_r0);
-
- // Move r1 to a double in r0-r1.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
- __ bind(&r1_is_not_smi);
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- if (mode_ == OVERWRITE_LEFT) {
- __ mov(r5, Operand(r1)); // Overwrite this heap number.
- }
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // Load the double from tagged HeapNumber r1 to d6.
- __ sub(r7, r1, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- // Calling convention says that first double is in r0 and r1.
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
- }
- __ jmp(&finished_loading_r1);
- __ bind(&r1_is_smi);
- if (mode_ == OVERWRITE_LEFT) {
- // We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Convert smi in r1 to double in d6.
- __ mov(r7, Operand(r1, ASR, kSmiTagSize));
- __ vmov(s13, r7);
- __ vcvt_f64_s32(d6, s13);
- if (!use_fp_registers) {
- __ vmov(r0, r1, d6);
- }
- } else {
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r9);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
-
- __ bind(&finished_loading_r1);
- }
-
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
- __ bind(&do_the_call);
- // If we are inlining the operation using VFP3 instructions for
- // add, subtract, multiply, or divide, the arguments are in d6 and d7.
- if (use_fp_registers) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
-
- if (Token::MUL == op_) {
- __ vmul(d5, d6, d7);
- } else if (Token::DIV == op_) {
- __ vdiv(d5, d6, d7);
- } else if (Token::ADD == op_) {
- __ vadd(d5, d6, d7);
- } else if (Token::SUB == op_) {
- __ vsub(d5, d6, d7);
- } else {
- UNREACHABLE();
- }
- __ sub(r0, r5, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
- } else {
- // If we did not inline the operation, then the arguments are in:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- __ push(lr); // For later.
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
- // Call C routine that may not cause GC or other trouble. r5 is callee
- // save.
- __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
- __ mov(r0, Operand(r5));
- // And we are done.
- __ pop(pc);
- }
- }
- }
-
- if (!generate_code_to_calculate_answer &&
- !slow_reverse.is_linked() &&
- !slow.is_linked()) {
- return;
- }
-
- if (lhs.is(r0)) {
- __ b(&slow);
- __ bind(&slow_reverse);
- __ Swap(r0, r1, ip);
- }
-
- heap_number_map = no_reg; // Don't use this any more from here on.
-
- // We jump to here if something goes wrong (one param is not a number of any
- // sort or new-space allocation fails).
- __ bind(&slow);
-
- // Push arguments to the stack
- __ Push(r1, r0);
-
- if (Token::ADD == op_) {
- // Test for string arguments before calling runtime.
- // r1 : first argument
- // r0 : second argument
- // sp[0] : second argument
- // sp[4] : first argument
-
- Label not_strings, not_string1, string1, string1_smi2;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &not_string1);
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string1);
-
- // First argument is a a string, test second.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &string1_smi2);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, r0, r2, r4, r5, r6, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ str(r2, MemOperand(sp, 0));
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &not_strings);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
- __ bind(&not_strings);
- }
-
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Fastest for doubles that are in the ranges
-// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
-// almost to the range of signed int32 values that are not Smis. Jumps to the
-// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
-// (excluding the endpoints).
-static void GetInt32(MacroAssembler* masm,
- Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- Label* slow) {
- Label right_exponent, done;
- // Get exponent word.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- __ mov(dest, Operand(0));
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- __ sub(scratch2, scratch2, Operand(fudge_factor));
- __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- __ b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ b(gt, slow);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- __ b(lt, &done);
- if (!CpuFeatures::IsSupported(VFP3)) {
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- __ rsb(dest, scratch2, Operand(30));
- }
- __ bind(&right_exponent);
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // ARMv7 VFP3 instructions implementing double precision to integer
- // conversion using round to zero.
- __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- __ vmov(d7, scratch2, scratch);
- __ vcvt_s32_f64(s15, d7);
- __ vmov(dest, s15);
- } else {
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
- }
- __ bind(&done);
-}
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs. On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label slow, result_not_a_smi;
- Label rhs_is_smi, lhs_is_smi;
- Label done_checking_rhs, done_checking_lhs;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- GetInt32(masm, lhs, r3, r5, r4, &slow);
- __ jmp(&done_checking_lhs);
- __ bind(&lhs_is_smi);
- __ mov(r3, Operand(lhs, ASR, 1));
- __ bind(&done_checking_lhs);
-
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
- __ cmp(r4, heap_number_map);
- __ b(ne, &slow);
- GetInt32(masm, rhs, r2, r5, r4, &slow);
- __ jmp(&done_checking_rhs);
- __ bind(&rhs_is_smi);
- __ mov(r2, Operand(rhs, ASR, 1));
- __ bind(&done_checking_rhs);
-
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
- // r0 and r1: Original operands (Smi or heap numbers).
- // r2 and r3: Signed int32 operands.
- switch (op_) {
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break;
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of writing
- // the register as an unsigned int so we go to slow case if we hit this
- // case.
- if (CpuFeatures::IsSupported(VFP3)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, &slow);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default: UNREACHABLE();
- }
- // check that the *signed* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- __ Ret();
-
- Label have_to_allocate, got_a_heap_number;
- __ bind(&result_not_a_smi);
- switch (mode_) {
- case OVERWRITE_RIGHT: {
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(rhs));
- break;
- }
- case OVERWRITE_LEFT: {
- __ tst(lhs, Operand(kSmiTagMask));
- __ b(eq, &have_to_allocate);
- __ mov(r5, Operand(lhs));
- break;
- }
- case NO_OVERWRITE: {
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- }
- default: break;
- }
- __ bind(&got_a_heap_number);
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r2);
- if (op_ == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- if (mode_ != NO_OVERWRITE) {
- __ bind(&have_to_allocate);
- // Get a new heap number in r5. r4 and r7 are scratch.
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
- __ jmp(&got_a_heap_number);
- }
-
- // If all else failed then we go to the runtime system.
- __ bind(&slow);
- __ Push(lhs, rhs); // Restore stack.
- switch (op_) {
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
- if (x < 2) return false; // Avoid special cases.
- if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
- if (IsPowerOf2(x)) return true; // Simple shift.
- if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
- if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
- return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register. This routine does
-// not set carry and overflow the way a mul instruction would.
-static void MultiplyByKnownInt(MacroAssembler* masm,
- Register source,
- Register destination,
- int known_int) {
- if (IsPowerOf2(known_int)) {
- __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
- } else if (PopCountLessThanEqual2(known_int)) {
- int first_bit = BitPosition(known_int);
- int second_bit = BitPosition(known_int ^ (1 << first_bit));
- __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
- if (first_bit != 0) {
- __ mov(destination, Operand(destination, LSL, first_bit));
- }
- } else {
- ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
- int the_bit = BitPosition(known_int + 1);
- __ rsb(destination, source, Operand(source, LSL, the_bit));
- }
-}
-
-
-// This function (as opposed to MultiplyByKnownInt) takes the known int in a
-// a register for the cases where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownInt2(
- MacroAssembler* masm,
- Register result,
- Register source,
- Register known_int_register, // Smi tagged.
- int known_int,
- int* required_shift) { // Including Smi tag shift
- switch (known_int) {
- case 3:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 1;
- break;
- case 5:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 1;
- break;
- case 6:
- __ add(result, source, Operand(source, LSL, 1));
- *required_shift = 2;
- break;
- case 7:
- __ rsb(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 9:
- __ add(result, source, Operand(source, LSL, 3));
- *required_shift = 1;
- break;
- case 10:
- __ add(result, source, Operand(source, LSL, 2));
- *required_shift = 2;
- break;
- default:
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
- __ mul(result, source, known_int_register);
- *required_shift = 0;
- }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask. On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry) {
- ASSERT(mask > 0);
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
- Label loop;
- __ bind(&loop);
- __ bic(scratch, lhs, Operand(mask));
- __ and_(ip, lhs, Operand(mask));
- __ add(lhs, ip, Operand(lhs, LSR, shift1));
- __ add(lhs, lhs, Operand(scratch, LSR, shift2));
- __ bind(entry);
- __ cmp(lhs, Operand(mask));
- __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits). The top
-// half is subtracted from the bottom half. If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs) {
- int mask = (1 << shift) - 1;
- __ and_(ip, lhs, Operand(mask));
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator) {
- int limit = denominator;
- while (limit * 2 <= max) limit *= 2;
- while (limit >= denominator) {
- __ cmp(lhs, Operand(limit));
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
- limit >>= 1;
- }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits) {
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
- __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
- __ bic(odd_number_, odd_number_, Operand(1));
- __ mov(odd_number_, Operand(odd_number_, LSL, 1));
- // We now have (odd_number_ - 1) * 2 in the register.
- // Build a switch out of branches instead of data because it avoids
- // having to teach the assembler about intra-code-object pointers
- // that are not in relative branch instructions.
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
- Label mod21, mod23, mod25;
- { Assembler::BlockConstPoolScope block_const_pool(masm);
- __ add(pc, pc, Operand(odd_number_));
- // When you read pc it is always 8 ahead, but when you write it you always
- // write the actual value. So we put in two nops to take up the slack.
- __ nop();
- __ nop();
- __ b(&mod3);
- __ b(&mod5);
- __ b(&mod7);
- __ b(&mod9);
- __ b(&mod11);
- __ b(&mod13);
- __ b(&mod15);
- __ b(&mod17);
- __ b(&mod19);
- __ b(&mod21);
- __ b(&mod23);
- __ b(&mod25);
- }
-
- // For each denominator we find a multiple that is almost only ones
- // when expressed in binary. Then we do the sum-of-digits trick for
- // that number. If the multiple is not 1 then we have to do a little
- // more work afterwards to get the answer into the 0-denominator-1
- // range.
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
- ModReduce(masm, lhs_, 0x3f, 11);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
- ModReduce(masm, lhs_, 0xff, 13);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
- ModReduce(masm, lhs_, 0xff, 19);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
- ModReduce(masm, lhs_, 0x3f, 21);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
- ModReduce(masm, lhs_, 0xff, 23);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
- ModReduce(masm, lhs_, 0x7f, 25);
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
@@ -8987,2787 +7107,6 @@ const char* GenericBinaryOpStub::GetName() {
}
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- // lhs_ : x
- // rhs_ : y
- // r0 : result
-
- Register result = r0;
- Register lhs = lhs_;
- Register rhs = rhs_;
-
- // This code can't cope with other register allocations yet.
- ASSERT(result.is(r0) &&
- ((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0))));
-
- Register smi_test_reg = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
-
- // All ops need to know whether we are dealing with two Smis. Set up
- // smi_test_reg to tell us that.
- if (ShouldGenerateSmiCode()) {
- __ orr(smi_test_reg, lhs, Operand(rhs));
- }
-
- switch (op_) {
- case Token::ADD: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
- break;
- }
-
- case Token::SUB: {
- Label not_smi;
- // Fast path.
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
- if (lhs.is(r1)) {
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
- } else {
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically.
- // Return if no overflow.
- __ Ret(vc);
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract.
- }
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
- break;
- }
-
- case Token::MUL: {
- Label not_smi, slow;
- if (ShouldGenerateSmiCode()) {
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ b(ne, &not_smi);
- // Remove tag from one operand (but keep sign), so that result is Smi.
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
- // Do multiplication
- // scratch = lower 32 bits of ip * lhs.
- __ smull(scratch, scratch2, lhs, ip);
- // Go slow on overflows (overflow bit is not set).
- __ mov(ip, Operand(scratch, ASR, 31));
- // No overflow if higher 33 bits are identical.
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &slow);
- // Go slow on zero result to handle -0.
- __ tst(scratch, Operand(scratch));
- __ mov(result, Operand(scratch), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, rhs, Operand(lhs), SetCC);
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
- // Slow case. We fall through here if we multiplied a negative number
- // with 0, because that would mean we should produce -0.
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
- break;
- }
-
- case Token::DIV:
- case Token::MOD: {
- Label not_smi;
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label lhs_is_unsuitable;
- __ BranchOnNotSmi(lhs, &not_smi);
- if (IsPowerOf2(constant_rhs_)) {
- if (op_ == Token::MOD) {
- __ and_(rhs,
- lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
- SetCC);
- // We now have the answer, but if the input was negative we also
- // have the sign bit. Our work is done if the result is
- // positive or zero:
- if (!rhs.is(r0)) {
- __ mov(r0, rhs, LeaveCC, pl);
- }
- __ Ret(pl);
- // A mod of a negative left hand side must return a negative number.
- // Unfortunately if the answer is 0 then we must return -0. And we
- // already optimistically trashed rhs so we may need to restore it.
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
- // Next two instructions are conditional on the answer being -0.
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &lhs_is_unsuitable);
- // We need to subtract the dividend. Eg. -3 % 4 == -3.
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
- } else {
- ASSERT(op_ == Token::DIV);
- __ tst(lhs,
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
- int shift = 0;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- d >>= 1;
- shift++;
- }
- __ mov(r0, Operand(lhs, LSR, shift));
- __ bic(r0, r0, Operand(kSmiTagMask));
- }
- } else {
- // Not a power of 2.
- __ tst(lhs, Operand(0x80000000u));
- __ b(ne, &lhs_is_unsuitable);
- // Find a fixed point reciprocal of the divisor so we can divide by
- // multiplying.
- double divisor = 1.0 / constant_rhs_;
- int shift = 32;
- double scale = 4294967296.0; // 1 << 32.
- uint32_t mul;
- // Maximise the precision of the fixed point reciprocal.
- while (true) {
- mul = static_cast<uint32_t>(scale * divisor);
- if (mul >= 0x7fffffff) break;
- scale *= 2.0;
- shift++;
- }
- mul++;
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- __ mov(scratch2, Operand(mul));
- __ umull(scratch, scratch2, scratch2, lhs);
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged.
- // rhs is still the known rhs. rhs is Smi tagged.
- // lhs is still the unkown lhs. lhs is Smi tagged.
- int required_scratch_shift = 0; // Including the Smi tag shift of 1.
- // scratch = scratch2 * rhs.
- MultiplyByKnownInt2(masm,
- scratch,
- scratch2,
- rhs,
- constant_rhs_,
- &required_scratch_shift);
- // scratch << required_scratch_shift is now the Smi tagged rhs *
- // (lhs / rhs) where / indicates integer division.
- if (op_ == Token::DIV) {
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &lhs_is_unsuitable); // There was a remainder.
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
- } else {
- ASSERT(op_ == Token::MOD);
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
- }
- }
- __ Ret();
- __ bind(&lhs_is_unsuitable);
- } else if (op_ == Token::MOD &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS) {
- // Do generate a bit of smi code for modulus even though the default for
- // modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense. We can handle
- // 1 to 25 times any power of 2. This covers over half the numbers from
- // 1 to 100 including all of the first 25. (Actually the constants < 10
- // are handled above by reciprocal multiplication. We only get here for
- // those cases if the right hand side is not a constant or for cases
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
- // stub.)
- Label slow;
- Label not_power_of_2;
- ASSERT(!ShouldGenerateSmiCode());
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
- // Check for two positive smis.
- __ orr(smi_test_reg, lhs, Operand(rhs));
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &slow);
- // Check that rhs is a power of two and not zero.
- Register mask_bits = r3;
- __ sub(scratch, rhs, Operand(1), SetCC);
- __ b(mi, &slow);
- __ and_(mask_bits, rhs, Operand(scratch), SetCC);
- __ b(ne, &not_power_of_2);
- // Calculate power of two modulus.
- __ and_(result, lhs, Operand(scratch));
- __ Ret();
-
- __ bind(&not_power_of_2);
- __ eor(scratch, scratch, Operand(mask_bits));
- // At least two bits are set in the modulus. The high one(s) are in
- // mask_bits and the low one is scratch + 1.
- __ and_(mask_bits, scratch, Operand(lhs));
- Register shift_distance = scratch;
- scratch = no_reg;
-
- // The rhs consists of a power of 2 multiplied by some odd number.
- // The power-of-2 part we handle by putting the corresponding bits
- // from the lhs in the mask_bits register, and the power in the
- // shift_distance register. Shift distance is never 0 due to Smi
- // tagging.
- __ CountLeadingZeros(r4, shift_distance, shift_distance);
- __ rsb(shift_distance, r4, Operand(32));
-
- // Now we need to find out what the odd number is. The last bit is
- // always 1.
- Register odd_number = r4;
- __ mov(odd_number, Operand(rhs, LSR, shift_distance));
- __ cmp(odd_number, Operand(25));
- __ b(gt, &slow);
-
- IntegerModStub stub(
- result, shift_distance, odd_number, mask_bits, lhs, r5);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
-
- __ bind(&slow);
- }
- HandleBinaryOpSlowCases(
- masm,
- &not_smi,
- lhs,
- rhs,
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label slow;
- STATIC_ASSERT(kSmiTag == 0); // adjust code below
- __ tst(smi_test_reg, Operand(kSmiTagMask));
- __ b(ne, &slow);
- Register scratch2 = smi_test_reg;
- smi_test_reg = no_reg;
- switch (op_) {
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break;
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(result, Operand(lhs, ASR, scratch2));
- // Smi tag result.
- __ bic(result, result, Operand(kSmiTagMask));
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch, Operand(0xc0000000));
- __ b(ne, &slow);
- // Smi tag result.
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x
- __ GetLeastBitsFromSmi(scratch2, rhs, 5);
- __ mov(scratch, Operand(scratch, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch, Operand(0x40000000), SetCC);
- __ b(mi, &slow);
- __ mov(result, Operand(scratch, LSL, kSmiTagSize));
- break;
- default: UNREACHABLE();
- }
- __ Ret();
- __ bind(&slow);
- HandleNonSmiBitwiseOp(masm, lhs, rhs);
- break;
- }
-
- default: UNREACHABLE();
- }
- // This code should be unreachable.
- __ stop("Unreachable");
-
- // Generate an unreachable reference to the DEFAULT stub so that it can be
- // found at the end of this stub when clearing ICs at GC.
- // TODO(kaznacheev): Check performance impact and get rid of this.
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
- __ CallStub(&uninit);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
- __ Push(r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Argument is a number and is on stack and in r0.
- Label runtime_call;
- Label input_not_smi;
- Label loaded;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Load argument and check if it is a smi.
- __ BranchOnNotSmi(r0, &input_not_smi);
-
- CpuFeatures::Scope scope(VFP3);
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &runtime_call,
- true);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
-
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- __ mov(r0,
- Operand(ExternalReference::transcendental_cache_array_address()));
- // r0 points to cache array.
- __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(r0, Operand(0));
- __ b(eq, &runtime_call);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(r0, r0, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ b(ne, &runtime_call);
- __ cmp(r3, r5);
- __ b(ne, &runtime_call);
- // Cache hit. Load result, pop argument and return.
- __ mov(r0, Operand(r6));
- __ pop();
- __ Ret();
- }
-
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Do tail-call to runtime routine. Runtime routines expect at least one
- // argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-
- __ StubReturn(1);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
-
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0));
- __ StubReturn(1);
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0), SetCC);
- __ StubReturn(1, vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
- }
-
- __ bind(&try_float);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
-
- // Convert the heap number is r0 to an untagged integer in r1.
- GetInt32(masm, r0, r1, r2, r3, &slow);
-
- // Do the bitwise operation (move negated) and check if the result
- // fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ b(&done);
-
- __ bind(&try_float);
- if (!overwrite_ == UNARY_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite r0 until
- // we're sure we can do it without going through the slow case
- // that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ mov(r0, Operand(r2));
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- } else {
- UNIMPLEMENTED();
- }
-
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // r0 holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Restore the next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ ldr(r2, MemOperand(sp, kStateOffset));
- __ cmp(r2, Operand(StackHandler::ENTRY));
- __ b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ ldr(sp, MemOperand(sp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(r0, Operand(false));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r0, MemOperand(r2));
- }
-
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // fp
- // lr
-
- // Discard handler state (r2 is not used) and restore frame pointer.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate,
- int frame_alignment_skew) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(), 1);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
-#if defined(V8_HOST_ARCH_ARM)
- if (FLAG_debug_code) {
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- __ sub(r2, sp, Operand(frame_alignment_skew));
- __ tst(r2, Operand(frame_alignment_mask));
- __ b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
- __ bind(&alignment_as_expected);
- }
- }
-#endif
-
- // Just before the call (jump) below lr is pushed, so the actual alignment is
- // adding one to the current skew.
- int alignment_before_call =
- (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
- if (alignment_before_call > 0) {
- // Push until the alignment before the call is met.
- __ mov(r2, Operand(0));
- for (int i = alignment_before_call;
- (i & frame_alignment_mask) != 0;
- i += kPointerSize) {
- __ push(r2);
- }
- }
-
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
- masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
- masm->push(lr);
- masm->Jump(r5);
-
- // Restore sp back to before aligning the stack.
- if (alignment_before_call > 0) {
- __ add(sp, sp, Operand(alignment_before_call));
- }
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
- }
-
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
-
- // Exit C frame and return.
- // r0:r1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(mode_);
-
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r3, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(r0, Operand(Factory::termination_exception()));
- __ b(eq, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_);
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false,
- -kPointerSize);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false,
- 0);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true,
- kPointerSize);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, exit;
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
- __ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
-
- // Setup frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Call a faked try-block that does the invoke.
- __ bl(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r5, MemOperand(ip));
- __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r5, MemOperand(ip));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ mov(ip, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ mov(ip, Operand(entry));
- }
- __ ldr(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc.
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
- __ str(r3, MemOperand(ip));
- // No need to restore registers
- __ add(sp, sp, Operand(StackHandlerConstants::kSize));
-
-
- __ bind(&exit); // r0 holds result
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
- __ str(r3, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses r1 for the object, r0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - slow case for smis (we may need to throw an exception
- // depending on the rhs).
- Label slow, loop, is_instance, is_not_instance;
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ BranchOnSmi(r0, &slow);
-
- // Check that the left hand is a JS object and put map in r3.
- __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
- __ b(gt, &slow);
-
- // Get the prototype of the function (r4 is result, r2 is scratch).
- __ ldr(r1, MemOperand(sp, 0));
- // r1 is function, r3 is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr));
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(r1, r4, r2, &slow);
-
- // Check that the function prototype is a JS object.
- __ BranchOnSmi(r4, &slow);
- __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
- __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
- __ b(gt, &slow);
-
- __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
-
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- __ bind(&loop);
- __ cmp(r2, Operand(r4));
- __ b(eq, &is_instance);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r2, ip);
- __ b(eq, &is_not_instance);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr)); // Return.
-
- __ bind(&is_not_instance);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ pop();
- __ pop();
- __ mov(pc, Operand(lr)); // Return.
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- static const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ BranchOnNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ cmp(r1, Operand(0));
- __ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4, offset));
-
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r1, Operand(0));
- __ b(eq, &done);
-
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
-
- // Copy the fixed array slots.
- Label loop;
- // Setup r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0));
- __ b(ne, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 0 * kPointerSize;
- static const int kPreviousIndexOffset = 1 * kPointerSize;
- static const int kSubjectOffset = 2 * kPointerSize;
- static const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r4;
- Register regexp_data = r5;
- Register last_match_info_elements = r6;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(r0, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r0, MemOperand(r0, 0));
- __ tst(r0, Operand(r0));
- __ b(eq, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(ne, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
- __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ b(ne, &runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ ldr(r2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
- __ b(hi, &runtime);
-
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string.
- __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
-
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
- __ cmp(r0, r1);
- __ b(ne, &runtime);
- __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // Is first part a flat string?
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r0, Operand(kStringRepresentationMask));
- __ b(nz, &runtime);
-
- __ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
- STATIC_ASSERT(4 == kAsciiStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
- __ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
-
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CompareObjectType(r7, r0, r0, CODE_TYPE);
- __ b(ne, &runtime);
-
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
- // r1: previous index
- // r3: encoding of subject string (1 if ascii, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
-
- static const int kRegExpExecuteArguments = 7;
- __ push(lr);
- __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
-
- // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
- __ mov(r0, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r2, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r2, MemOperand(r2, 0));
- __ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 5 (sp[0]): static offsets vector buffer.
- __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
- __ str(r0, MemOperand(sp, 0 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ eor(r3, r3, Operand(1));
- // Argument 4 (r3): End of string data
- // Argument 3 (r2): Start of string data
- __ add(r2, r9, Operand(r1, LSL, r3));
- __ add(r3, r9, Operand(r0, LSL, r3));
-
- // Argument 2 (r1): Previous index.
- // Already there
-
- // Argument 1 (r0): Subject string.
- __ mov(r0, subject);
-
- // Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r7, kRegExpExecuteArguments);
- __ pop(lr);
-
- // r0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
-
- // Check the result.
- Label success;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
- __ b(eq, &success);
- Label failure;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ b(eq, &failure);
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ b(ne, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ ldr(r1, MemOperand(r1, 0));
- __ cmp(r0, r1);
- __ b(eq, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r0, Operand(Factory::null_value()));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ ldr(r1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r1, r1, Operand(2)); // r1 was a smi.
-
- // r1: number of capture registers
- // r4: subject string
- // Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
- __ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
- __ mov(r2, Operand(address_of_static_offsets_vector));
-
- // r1: number of capture registers
- // r2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(mi, &done);
- // Read the value from the static offsets vector buffer.
- __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
- // Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- Label receiver_is_value, receiver_is_js_object;
- __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ BranchOnSmi(r1, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
- __ b(ge, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(r1);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
- __ LeaveInternalFrame();
- __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ BranchOnSmi(r1, &slow);
- // Get the map of the function object.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Setup the number of arguments.
- __ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
- const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ tst(result_, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
- __ b(ls, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(eq, &flat_string);
-
- // Handle non-flat strings.
- __ tst(result_, Operand(kIsConsStringMask));
- __ b(eq, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result_, Operand(kStringRepresentationMask));
- __ b(nz, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ tst(result_, Operand(kStringEncodingMask));
- __ b(nz, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- scratch_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- true);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ BranchOnNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ascii char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_, Operand(ip));
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0));
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand(0));
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
- __ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- // Load undefined value
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- Register candidate = scratch5; // Scratch register contains candidate.
-
- // Calculate entry in symbol table.
- if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_symbol_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- __ cmp(candidate, undefined);
- __ b(eq, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check that the candidate is a non-external ascii string.
- __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
- &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ add(hash, character, Operand(character, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, ASR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, ASR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, nz);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- static const int kToOffset = 0 * kPointerSize;
- static const int kFromOffset = 1 * kPointerSize;
- static const int kStringOffset = 2 * kPointerSize;
-
-
- // Check bounds and smi-ness.
- __ ldr(r7, MemOperand(sp, kToOffset));
- __ ldr(r6, MemOperand(sp, kFromOffset));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(r7, ASR, 1), SetCC);
- __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
- // If either r2 or r6 had the smi tag bit set, then carry is set now.
- __ b(cs, &runtime); // Either "from" or "to" is not a smi.
- __ b(mi, &runtime); // From is negative.
-
- __ sub(r2, r2, Operand(r3), SetCC);
- __ b(mi, &runtime); // Fail if from > to.
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ cmp(r2, Operand(2));
- __ b(lt, &runtime);
-
- // r2: length
- // r3: from index (untaged smi)
- // r6: from (smi)
- // r7: to (smi)
-
- // Make sure first argument is a sequential (or flat) string.
- __ ldr(r5, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
- Condition is_string = masm->IsObjectStringType(r5, r1);
- __ b(NegateCondition(is_string), &runtime);
-
- // r1: instance type
- // r2: length
- // r3: from index (untaged smi)
- // r5: string
- // r6: from (smi)
- // r7: to (smi)
- Label seq_string;
- __ and_(r4, r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag < kConsStringTag);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- __ cmp(r4, Operand(kConsStringTag));
- __ b(gt, &runtime); // External strings go to runtime.
- __ b(lt, &seq_string); // Sequential strings are handled directly.
-
- // Cons string. Try to recurse (once) on the first substring.
- // (This adds a little more generality than necessary to handle flattened
- // cons strings, but not much).
- __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
- __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ tst(r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ b(ne, &runtime); // Cons and External strings go to runtime.
-
- // Definitly a sequential string.
- __ bind(&seq_string);
-
- // r1: instance type.
- // r2: length
- // r3: from index (untaged smi)
- // r5: string
- // r6: from (smi)
- // r7: to (smi)
- __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
- __ cmp(r4, Operand(r7));
- __ b(lt, &runtime); // Fail if to > length.
-
- // r1: instance type.
- // r2: result string length.
- // r3: from index (untaged smi)
- // r5: string.
- // r6: from offset (smi)
- // Check for flat ascii string.
- Label non_ascii_flat;
- __ tst(r1, Operand(kStringEncodingMask));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ b(eq, &non_ascii_flat);
-
- Label result_longer_than_two;
- __ cmp(r2, Operand(2));
- __ b(gt, &result_longer_than_two);
-
- // Sub string of length 2 requested.
- // Get the two characters forming the sub string.
- __ add(r5, r5, Operand(r3));
- __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
- __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // r2: result string length.
- // r3: two characters combined into halfword in little endian byte order.
- __ bind(&make_two_character_string);
- __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
- __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&result_longer_than_two);
-
- // Allocate the result.
- __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // r6: from offset (smi)
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r6, ASR, 1));
-
- // r0: result string.
- // r1: first character of result string.
- // r2: result string length.
- // r5: first character of sub string to copy.
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_flat);
- // r2: result string length.
- // r5: string.
- // r6: from offset (smi)
- // Check for flat two byte string.
-
- // Allocate the result.
- __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
-
- // r0: result string.
- // r2: result string length.
- // r5: string.
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate 'from' character of string.
- __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // As "from" is a smi it is 2 times the value which matches the size of a two
- // byte character.
- __ add(r5, r5, Operand(r6));
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of string to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- DEST_ALWAYS_ALIGNED);
- __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(min_length, Operand(min_length));
- __ b(eq, &compare_lengths);
-
- // Untag smi.
- __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
-
- // Setup registers so that we only need to increment one register
- // in the loop.
- __ add(scratch2, min_length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch2));
- __ add(right, right, Operand(scratch2));
- // Registers left and right points to the min_length character of strings.
- __ rsb(min_length, min_length, Operand(-1));
- Register index = min_length;
- // Index starts at -min_length.
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ add(index, index, Operand(1), SetCC);
- __ ldrb(scratch2, MemOperand(left, index), ne);
- __ ldrb(scratch4, MemOperand(right, index), ne);
- // Skip to compare lengths with eq condition true.
- __ b(eq, &compare_lengths);
- __ cmp(scratch2, scratch4);
- __ b(eq, &loop);
- // Fallthrough with eq condition false.
- }
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use zero length_delta as result.
- __ mov(r0, Operand(length_delta), SetCC, eq);
- // Fall through to here if characters compare not-equal.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
-
- // Compare flat ascii strings natively. Remove arguments from stack first.
- __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
- // Stack on entry:
- // sp[0]: second argument.
- // sp[4]: first argument.
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(String::kMinNonFlatLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ascii the result is an ascii cons string.
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are
- // sequential and that they have the same encoding.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
- // r6: sum of lengths.
- __ bind(&string_add_flat_result);
- if (!string_check_) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- // Check that both strings are sequential.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- __ tst(r5, Operand(kStringRepresentationMask), eq);
- __ b(ne, &string_add_runtime);
- // Now check if both strings have the same encoding (ASCII/Two-byte).
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths..
- Label non_ascii_string_add_flat_result;
- ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &string_add_runtime);
- // And see if it's ASCII or two-byte.
- __ tst(r4, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- // Both strings are sequential ASCII strings. We also know that they are
- // short (since the sum of the lengths is less than kMinNonFlatLength).
- // r6: length of resulting flat string
- __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
-
- // Load second argument and locate first character.
- __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are sequential two byte strings.
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of length of strings.
- __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r7: result string.
-
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: first character of first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
-
- // Locate first character of second argument.
- __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result (after copy of first string).
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
-
- __ mov(r0, Operand(r7));
- __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index e550a62c..c522154a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -28,8 +28,9 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "ic-inl.h"
#include "ast.h"
+#include "code-stubs-arm.h"
+#include "ic-inl.h"
namespace v8 {
namespace internal {
@@ -270,8 +271,6 @@ class CodeGenerator: public AstVisitor {
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- static const int kUnknownIntValue = -1;
-
// If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
@@ -420,7 +419,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
- int known_rhs = kUnknownIntValue);
+ int known_rhs =
+ GenericBinaryOpStub::kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@@ -550,6 +550,9 @@ class CodeGenerator: public AstVisitor {
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -612,510 +615,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
-class GenericBinaryOpStub : public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = CodeGenerator::kUnknownIntValue)
- : op_(op),
- mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) { }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
-
- // Minor key encoding in 17 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 2> {};
- class RegisterBits: public BitField<bool, 10, 1> {};
- class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(r0));
- }
-
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
- }
-
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
-
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
- }
-
- Register LhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r0 : r1;
- }
-
- Register RhsRegister(bool lhs_is_r0) {
- return lhs_is_r0 ? r1 : r0;
- }
-
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
- }
-#endif
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public CodeStub {
- public:
- StringCompareStub() { }
-
- // Compare two flat ASCII strings and returns result in r0.
- // Does not use the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer. It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis. This is never called
-// where the denominator is a power of 2. We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register. It is always in the range
-// of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-// shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-// register.
-class IntegerModStub : public CodeStub {
- public:
- IntegerModStub(Register result,
- Register shift_distance,
- Register odd_number,
- Register mask_bits,
- Register lhs,
- Register scratch)
- : result_(result),
- shift_distance_(shift_distance),
- odd_number_(odd_number),
- mask_bits_(mask_bits),
- lhs_(lhs),
- scratch_(scratch) {
- // We don't code these in the minor key, so they should always be the same.
- // We don't really want to fix that since this stub is rather large and we
- // don't want many copies of it.
- ASSERT(shift_distance_.is(r9));
- ASSERT(odd_number_.is(r4));
- ASSERT(mask_bits_.is(r3));
- ASSERT(scratch_.is(r5));
- }
-
- private:
- Register result_;
- Register shift_distance_;
- Register odd_number_;
- Register mask_bits_;
- Register lhs_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ResultRegisterBits: public BitField<int, 0, 4> {};
- class LhsRegisterBits: public BitField<int, 4, 4> {};
-
- Major MajorKey() { return IntegerMod; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return ResultRegisterBits::encode(result_.code())
- | LhsRegisterBits::encode(lhs_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "IntegerModStub"; }
-
- // Utility functions.
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- int mask,
- int shift,
- Label* entry);
- void DigitSum(MacroAssembler* masm,
- Register lhs,
- Register scratch,
- int mask,
- int shift1,
- int shift2,
- Label* entry);
- void ModGetInRangeBySubtraction(MacroAssembler* masm,
- Register lhs,
- int shift,
- int rhs);
- void ModReduce(MacroAssembler* masm,
- Register lhs,
- int max,
- int denominator);
- void ModAnswer(MacroAssembler* masm,
- Register result,
- Register shift_distance,
- Register mask_bits,
- Register sum_of_digits);
-
-
-#ifdef DEBUG
- void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register offset, Register scratch)
- : object_(object), offset_(offset), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register offset_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
- " (scratch reg %d)\n",
- object_.code(), offset_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, offset and scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class OffsetBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- OffsetBits::encode(offset_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 2ac9a413..b2b5cb56 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -194,6 +194,13 @@ enum SoftwareInterruptCodes {
};
+// Type of VFP register. Determines register encoding.
+enum VFPRegPrecision {
+ kSinglePrecision = 0,
+ kDoublePrecision = 1
+};
+
+
typedef int32_t instr_t;
@@ -269,6 +276,15 @@ class Instr {
inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); }
+ inline int VFPNRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 16, 7);
+ }
+ inline int VFPMRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 0, 5);
+ }
+ inline int VFPDRegCode(VFPRegPrecision pre) {
+ return VFPGlueRegCode(pre, 12, 22);
+ }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -343,6 +359,17 @@ class Instr {
static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
private:
+ // Join split register codes, depending on single or double precision.
+ // four_bit is the position of the least-significant bit of the four
+ // bit specifier. one_bit is the position of the additional single bit
+ // specifier.
+ inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
+ if (pre == kSinglePrecision) {
+ return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+ }
+ return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+ }
+
// We need to prevent the creation of instances of class Instr.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
};
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 3a948451..82f93b62 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -130,21 +130,30 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
+ RegList object_regs,
+ RegList non_object_regs) {
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- // Use sp as base to push.
- __ CopyRegistersFromMemoryToStack(sp, pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
+ }
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+ }
+ }
+ __ stm(db_w, sp, object_regs | non_object_regs);
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -152,19 +161,27 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand(0)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
- // Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- // Use sp as base to pop.
- __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs);
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
+ }
+ }
__ LeaveInternalFrame();
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -184,7 +201,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
}
@@ -198,7 +215,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -----------------------------------
// Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
}
@@ -206,9 +223,8 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
- // -- sp[0] : key
- // -- sp[4] : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ // -- r1 : receiver
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
}
@@ -218,31 +234,24 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
- // -- r0: number of arguments
- // -- r1: receiver
- // -- lr: return address
+ // -- r2 : name
// -----------------------------------
- // Register r1 contains an object that needs to be pushed on the expression
- // stack of the fake JS frame. r0 is the actual number of arguments not
- // encoded as a smi, therefore it cannot be on the expression stack of the
- // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
- // pushed on the stack of the C frame and restored from there.
- Generate_DebugBreakCallHelper(masm, r1.bit());
+ Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ // Calling convention for construct call (from builtins-arm.cc)
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
}
@@ -250,7 +259,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
- Generate_DebugBreakCallHelper(masm, r0.bit());
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
@@ -258,7 +267,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
@@ -280,7 +289,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0);
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 0029ed16..5122f437 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -463,7 +463,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d",
+ "%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
}
@@ -931,7 +931,7 @@ void Decoder::DecodeType3(Instr* instr) {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat");
+ Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
} else {
UNREACHABLE(); // SSAT.
}
@@ -1269,17 +1269,19 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
+ case 0xA:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
+ Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
+ Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
+ Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
+ Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
}
break;
default:
@@ -1300,16 +1302,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
break;
case 0x8:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
}
break;
case 0xC:
if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
}
break;
default:
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 271e4a6f..47434392 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -37,87 +37,20 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
- Address sp = fp + ExitFrameConstants::kSPDisplacement;
- const int offset = ExitFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp + offset);
- bool is_debug_exit = code->IsSmi();
- if (is_debug_exit) {
- sp -= kNumJSCallerSaved * kPointerSize;
- }
+ Address sp = fp + ExitFrameConstants::kSPOffset;
+
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 4924c1ae..5847a6a2 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -96,11 +96,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // Exit frames have a debug marker on the stack.
- static const int kSPDisplacement = -1 * kPointerSize;
-
- // The debug marker is just above the frame pointer.
static const int kCodeOffset = -1 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 21813241..912fefc7 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -245,6 +246,13 @@ void FullCodeGenerator::EmitReturnSequence() {
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ return kNoConstants;
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -266,16 +274,10 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
}
break;
- case Expression::kValueTest:
- case Expression::kTestValue:
- // Push an extra copy of the value in case it's needed.
- __ push(reg);
- // Fall through.
-
case Expression::kTest:
- // We always call the runtime on ARM, so push the value as argument.
- __ push(reg);
- DoTest(context);
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -290,8 +292,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// On ARM we have to move the value into a register to do anything
// with it.
Move(result_register(), slot);
@@ -310,8 +310,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
// Nothing to do.
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// On ARM we have to move the value into a register to do anything
// with it.
__ mov(result_register(), Operand(lit->handle()));
@@ -340,15 +338,9 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
}
break;
- case Expression::kValueTest:
- case Expression::kTestValue:
- // Duplicate the value on the stack in case it's needed.
- __ ldr(ip, MemOperand(sp));
- __ push(ip);
- // Fall through.
-
case Expression::kTest:
- DoTest(context);
+ __ pop(result_register());
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -381,54 +373,9 @@ void FullCodeGenerator::DropAndApply(int count,
break;
case Expression::kTest:
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (count == 1) {
- __ str(reg, MemOperand(sp));
- __ push(reg);
- } else { // count > 1
- __ Drop(count - 2);
- __ str(reg, MemOperand(sp, kPointerSize));
- __ str(reg, MemOperand(sp));
- }
- DoTest(context);
- break;
- }
-}
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -471,34 +418,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -527,103 +446,40 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ b(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) {
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- }
- break;
- }
- __ b(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- }
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
}
- __ b(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is pushed on the stack, and duplicated on the stack
- // if necessary (for value/test and test/value contexts).
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
+ __ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+}
- // Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ b(eq, true_label_);
- __ jmp(false_label_);
- break;
-
- case Expression::kValueTest: {
- Label discard;
- switch (location_) {
- case kAccumulator:
- __ b(ne, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ b(eq, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- switch (location_) {
- case kAccumulator:
- __ b(eq, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ b(ne, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ b(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ b(NegateCondition(cc), if_false);
+ } else {
+ __ b(cc, if_true);
+ __ b(if_false);
}
}
@@ -816,19 +672,20 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
+ // Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target()->entry_label());
-
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ orr(r2, r1, r0);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(ne, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target()->entry_label());
__ bind(&slow_case);
+ }
+
CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0));
@@ -1107,28 +964,33 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r2 = RegExp pattern
// r1 = RegExp flags
// r0 = temp + materialized value (RegExp literal)
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(ne, &materialized);
+
+ // Create regexp literal using runtime function.
+ // Result will be in r0.
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+
// After this, registers are used as follows:
// r0: Newly allocated regexp.
- // r1: Materialized regexp
+ // r1: Materialized regexp.
// r2: temp.
__ pop(r1);
__ CopyFields(r0, r1, r2.bit(), size / kPointerSize);
@@ -1289,10 +1151,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1303,58 +1166,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
- // We need the key and receiver on both the stack and in r0 and r1.
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(r0); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1395,10 +1270,23 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ ASSERT(constant == kNoConstants); // Only handled case.
+ EmitBinaryOp(op, context, mode);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
+ Expression::Context context,
+ OverwriteMode mode) {
__ pop(r1);
- GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0);
+ GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
Apply(context, r0);
}
@@ -1827,12 +1715,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1844,16 +1732,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into r1 and r0.
+ // Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
- // Function is in sp[arg_count + 1].
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in r0, or pop it.
- DropAndApply(1, context_, r0);
+ Apply(context_, r0);
}
@@ -1865,7 +1750,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_true);
__ b(if_false);
@@ -1882,11 +1769,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ tst(r0, Operand(kSmiTagMask | 0x80000000));
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1900,7 +1788,10 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
__ BranchOnSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip);
@@ -1914,8 +1805,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, if_false);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
- __ b(le, if_true);
- __ b(if_false);
+ Split(le, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1929,12 +1819,13 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(ge, if_true);
- __ b(if_false);
+ Split(ge, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1948,14 +1839,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_true);
- __ b(if_false);
+ Split(ne, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1971,7 +1863,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -1989,12 +1883,13 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2008,12 +1903,13 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2027,12 +1923,13 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2045,7 +1942,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2061,8 +1960,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2078,12 +1976,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(r1);
__ cmp(r0, r1);
- __ b(eq, if_true);
- __ b(if_false);
+ Split(eq, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2536,14 +2435,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
- __ CallRuntime(Runtime::kRegExpConstructResult, 1);
- Apply(context_, r0);
-}
-
-
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
@@ -2648,6 +2539,35 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
+
+ __ b(eq, if_true);
+ __ b(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kAccumulator);
+ __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ IndexFromHash(r0, r0);
+ Apply(context_, r0);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2752,19 +2672,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2776,42 +2684,19 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ push(r0);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, r0);
break;
@@ -2832,9 +2717,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2848,28 +2731,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register r0.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register r0.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- __ BranchOnSmi(result_register(), &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ BranchOnNotSmi(r0, &call_stub);
+ __ mvn(r0, Operand(r0));
+ // Bit-clear inverted smi-tag.
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ __ b(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ b(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ mvn(result_register(), Operand(result_register()));
- // Bit-clear inverted smi-tag.
- __ bic(result_register(), result_register(), Operand(kSmiTagMask));
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, r0);
break;
}
@@ -2881,6 +2762,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
@@ -2945,8 +2828,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -2969,7 +2850,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
int count_value = expr->op() == Token::INC ? 1 : -1;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
@@ -3034,68 +2915,126 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(r0);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ Push(cp, r0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
}
}
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmp(obj, null_const);
- if (strict) {
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
__ b(eq, if_true);
- } else {
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // Check for undetectable objects => false.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, if_false);
+ __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
+ Split(lt, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
__ b(eq, if_true);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(obj, ip);
+ __ cmp(r0, ip);
__ b(eq, if_true);
- __ BranchOnSmi(obj, if_false);
- // It can be an undetectable object.
- __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_true);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // Check for undetectable objects => true.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE);
+ __ b(eq, if_true);
+ // Regular expressions => 'function' (they are callable).
+ __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE);
+ __ b(eq, if_false);
+ // Check for undetectable objects => false.
+ __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, if_false);
+ // Check for JS objects => true.
+ __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, if_false);
+ __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE));
+ Split(le, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
@@ -3103,26 +3042,37 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
- switch (expr->op()) {
+ switch (op) {
case Token::IN:
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_JS);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
- __ b(eq, if_true);
- __ jmp(if_false);
+ Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
VisitForValue(expr->right(), kStack);
InstanceofStub stub;
__ CallStub(&stub);
+ // The stub returns 0 for true.
__ tst(r0, r0);
- __ b(eq, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ Split(eq, if_true, if_false, fall_through);
break;
}
@@ -3130,28 +3080,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = eq;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ: {
+ case Token::EQ:
cc = eq;
__ pop(r1);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, r1, r0, if_true, if_false, r2);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, r0, r1, if_true, if_false, r2);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = lt;
__ pop(r1);
@@ -3178,21 +3114,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- __ BranchOnNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ BranchOnNotSmi(r2, &slow_case);
+ __ cmp(r1, r0);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0));
- __ b(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3202,6 +3136,38 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ if (expr->is_strict()) {
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ __ b(eq, if_true);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, if_true);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, if_false);
+ // It can be an undetectable object.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, r0);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index abc09222..49d7b2d0 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "assembler-arm.h"
-#include "codegen.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
@@ -527,32 +527,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- __ mov(key, Operand(hash, LSL, kSmiTagSize));
-}
-
-
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
@@ -852,7 +826,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, r2, r3);
+ __ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1249,7 +1223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret();
__ bind(&index_string);
- GenerateIndexFromHash(masm, key, r3);
+ __ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 0d04156d..0b6e7b33 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -513,7 +513,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::EnterExitFrame() {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -556,16 +556,6 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Setup argc and the builtin function in callee-saved registers.
mov(r4, Operand(r0));
mov(r5, Operand(r1));
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // Use sp as base to push.
- CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
- }
-#endif
}
@@ -600,19 +590,7 @@ int MacroAssembler::ActivationFrameAlignment() {
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // This code intentionally clobbers r2 and r3.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- add(r3, fp, Operand(kOffset));
- CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
- }
-#endif
-
+void MacroAssembler::LeaveExitFrame() {
// Clear top frame.
mov(r3, Operand(0));
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -779,66 +757,8 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- str(reg, MemOperand(ip));
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(reg, MemOperand(ip));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the memory location to the stack and adjust base.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(ip, MemOperand(ip));
- str(ip, MemOperand(base, 4, NegPreIndex));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
- ldr(scratch, MemOperand(base, 4, PostIndex));
- str(scratch, MemOperand(ip));
- }
- }
-}
-
+#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0));
@@ -1337,6 +1257,21 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ mov(index, Operand(hash, LSL, kSmiTagSize));
+}
+
+
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg,
Register outLowReg) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index a1c5dbb5..207ee5cb 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -250,14 +250,14 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either normal or debug mode.
+ // Enter exit frame.
// Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(ExitFrame::Mode mode);
+ void EnterExitFrame();
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame();
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -294,12 +294,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void CopyRegistersFromMemoryToStack(Register base, RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -475,6 +469,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index c67c7aac..72b635fe 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -31,12 +31,10 @@
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
namespace v8 {
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 2c0a8d84..93a74d7c 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -242,22 +242,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
#endif // V8_INTERPRETED_REGEXP
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index c7fc13f8..64262b2b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2281,13 +2281,6 @@ void Simulator::DecodeUnconditional(Instr* instr) {
}
-// Depending on value of last_bit flag glue register code from vm and m values
-// (where m is expected to be a single bit).
-static int GlueRegCode(bool last_bit, int vm, int m) {
- return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
-}
-
-
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
@@ -2305,9 +2298,10 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
- int vm = instr->VmField();
- int vd = instr->VdField();
- int vn = instr->VnField();
+ // Obtain double precision register codes.
+ int vm = instr->VFPMRegCode(kDoublePrecision);
+ int vd = instr->VFPDRegCode(kDoublePrecision);
+ int vn = instr->VFPNRegCode(kDoublePrecision);
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
@@ -2315,9 +2309,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
// vmov register to register.
if (instr->SzField() == 0x1) {
- set_d_register_from_double(vd, get_double_from_d_register(vm));
+ int m = instr->VFPMRegCode(kDoublePrecision);
+ int d = instr->VFPDRegCode(kDoublePrecision);
+ set_d_register_from_double(d, get_double_from_d_register(m));
} else {
- set_s_register_from_float(vd, get_float_from_s_register(vm));
+ int m = instr->VFPMRegCode(kSinglePrecision);
+ int d = instr->VFPDRegCode(kSinglePrecision);
+ set_s_register_from_float(d, get_float_from_s_register(m));
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -2410,7 +2408,7 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
(instr->VAField() == 0x0));
int t = instr->RtField();
- int n = GlueRegCode(true, instr->VnField(), instr->NField());
+ int n = instr->VFPNRegCode(kSinglePrecision);
bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) {
@@ -2427,22 +2425,25 @@ void Simulator::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
-
// Comparison.
- bool dp_operation = (instr->SzField() == 1);
+
+ VFPRegPrecision precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ precision = kDoublePrecision;
+ }
if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8.
}
- int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
+ int d = instr->VFPDRegCode(precision);
int m = 0;
if (instr->Opc2Field() == 0x4) {
- m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+ m = instr->VFPMRegCode(precision);
}
- if (dp_operation) {
+ if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d);
double dm_value = 0.0;
if (instr->Opc2Field() == 0x4) {
@@ -2460,11 +2461,17 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
- bool double_to_single = (instr->SzField() == 1);
- int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
- int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
+ VFPRegPrecision dst_precision = kDoublePrecision;
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ dst_precision = kSinglePrecision;
+ src_precision = kDoublePrecision;
+ }
- if (double_to_single) {
+ int dst = instr->VFPDRegCode(dst_precision);
+ int src = instr->VFPMRegCode(src_precision);
+
+ if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src);
set_s_register_from_float(dst, static_cast<float>(val));
} else {
@@ -2480,13 +2487,13 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
// Conversion between floating-point and integer.
- int vd = instr->VdField();
- int d = instr->DField();
- int vm = instr->VmField();
- int m = instr->MField();
-
bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzField() == 1);
+
+ VFPRegPrecision src_precision = kSinglePrecision;
+ if (instr->SzField() == 1) {
+ src_precision = kDoublePrecision;
+ }
+
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (instr->Bit(7) != 1) {
@@ -2494,10 +2501,10 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8.
}
- int dst = GlueRegCode(true, vd, d);
- int src = GlueRegCode(!dp_operation, vm, m);
+ int dst = instr->VFPDRegCode(kSinglePrecision);
+ int src = instr->VFPMRegCode(src_precision);
- if (dp_operation) {
+ if (src_precision == kDoublePrecision) {
double val = get_double_from_d_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
@@ -2515,12 +2522,12 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
- int dst = GlueRegCode(!dp_operation, vd, d);
- int src = GlueRegCode(true, vm, m);
+ int dst = instr->VFPDRegCode(src_precision);
+ int src = instr->VFPMRegCode(kSinglePrecision);
int val = get_sinteger_from_s_register(src);
- if (dp_operation) {
+ if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
set_d_register_from_double(dst,
static_cast<double>((uint32_t)val));
@@ -2551,9 +2558,11 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
case 0x8:
- case 0xC: { // Load and store float to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store single precision float to memory.
int rn = instr->RnField();
- int vd = instr->VdField();
+ int vd = instr->VFPDRegCode(kSinglePrecision);
int offset = instr->Immed8Field();
if (!instr->HasU()) {
offset = -offset;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 2a7c22d7..344cb6fb 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1363,8 +1363,68 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r0);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+ r1, r3, r4, name, &miss);
+
+ Register receiver = r1;
+ Register index = r4;
+ Register scratch = r3;
+ Register result = r0;
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
@@ -1373,8 +1433,71 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- r2 : function name
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r0);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+ r1, r3, r4, name, &miss);
+
+ Register receiver = r0;
+ Register index = r4;
+ Register scratch1 = r1;
+ Register scratch2 = r3;
+ Register result = r0;
+ __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
diff --git a/src/array.js b/src/array.js
index f3c0697b..e12df641 100644
--- a/src/array.js
+++ b/src/array.js
@@ -566,13 +566,6 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
- if (num_arguments == 0) return;
-
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
@@ -953,7 +946,8 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) {
- var length = this.length;
+ var length = TO_UINT32(this.length);
+ if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
index = 0;
} else {
@@ -963,13 +957,13 @@ function ArrayIndexOf(element, index) {
// If index is still negative, search the entire array.
if (index < 0) index = 0;
}
+ // Lookup through the array.
if (!IS_UNDEFINED(element)) {
for (var i = index; i < length; i++) {
if (this[i] === element) return i;
}
return -1;
}
- // Lookup through the array.
for (var i = index; i < length; i++) {
if (IS_UNDEFINED(this[i]) && i in this) {
return i;
@@ -980,7 +974,8 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
- var length = this.length;
+ var length = TO_UINT32(this.length);
+ if (length == 0) return -1;
if (%_ArgumentsLength() < 2) {
index = length - 1;
} else {
diff --git a/src/ast-inl.h b/src/ast-inl.h
index 717f68d0..f0a25c17 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -64,8 +64,7 @@ ForStatement::ForStatement(ZoneStringList* labels)
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
- loop_variable_(NULL),
- peel_this_loop_(false) {
+ loop_variable_(NULL) {
}
diff --git a/src/ast.cc b/src/ast.cc
index 92df9900..0d07a58a 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -28,7 +28,6 @@
#include "v8.h"
#include "ast.h"
-#include "data-flow.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
@@ -78,18 +77,14 @@ VariableProxy::VariableProxy(Handle<String> name,
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
- is_trivial_(false),
- reaching_definitions_(NULL),
- is_primitive_(false) {
+ is_trivial_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
VariableProxy::VariableProxy(bool is_this)
- : is_this_(is_this),
- reaching_definitions_(NULL),
- is_primitive_(false) {
+ : is_this_(is_this) {
}
@@ -237,6 +232,59 @@ bool Expression::GuaranteedSmiResult() {
return false;
}
+
+void Expression::CopyAnalysisResultsFrom(Expression* other) {
+ bitfields_ = other->bitfields_;
+ type_ = other->type_;
+}
+
+
+bool UnaryOperation::ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::BIT_NOT:
+ case Token::SUB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+bool BinaryOperation::ResultOverwriteAllowed() {
+ switch (op_) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ return false;
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ return true;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+}
+
+
+BinaryOperation::BinaryOperation(Assignment* assignment) {
+ ASSERT(assignment->is_compound());
+ op_ = assignment->binary_op();
+ left_ = assignment->target();
+ right_ = assignment->value();
+ pos_ = assignment->position();
+ CopyAnalysisResultsFrom(assignment);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@@ -575,218 +623,6 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
}
}
-// IsPrimitive implementation. IsPrimitive is true if the value of an
-// expression is known at compile-time to be any JS type other than Object
-// (e.g, it is Undefined, Null, Boolean, String, or Number).
-
-// The following expression types are never primitive because they express
-// Object values.
-bool FunctionLiteral::IsPrimitive() { return false; }
-bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
-bool RegExpLiteral::IsPrimitive() { return false; }
-bool ObjectLiteral::IsPrimitive() { return false; }
-bool ArrayLiteral::IsPrimitive() { return false; }
-bool CatchExtensionObject::IsPrimitive() { return false; }
-bool CallNew::IsPrimitive() { return false; }
-bool ThisFunction::IsPrimitive() { return false; }
-
-
-// The following expression types are not always primitive because we do not
-// have enough information to conclude that they are.
-bool Property::IsPrimitive() { return false; }
-bool Call::IsPrimitive() { return false; }
-bool CallRuntime::IsPrimitive() { return false; }
-
-
-// A variable use is not primitive unless the primitive-type analysis
-// determines otherwise.
-bool VariableProxy::IsPrimitive() {
- ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
- return is_primitive_;
-}
-
-// The value of a conditional is the value of one of the alternatives. It's
-// always primitive if both alternatives are always primitive.
-bool Conditional::IsPrimitive() {
- return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
-}
-
-
-// A literal is primitive when it is not a JSObject.
-bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
-
-
-// The value of an assignment is the value of its right-hand side.
-bool Assignment::IsPrimitive() {
- switch (op()) {
- case Token::INIT_VAR:
- case Token::INIT_CONST:
- case Token::ASSIGN:
- return value()->IsPrimitive();
-
- default:
- // {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
- // Arithmetic operations are always primitive. They express Numbers
- // with the exception of +, which expresses a Number or a String.
- return true;
- }
-}
-
-
-// Throw does not express a value, so it's trivially always primitive.
-bool Throw::IsPrimitive() { return true; }
-
-
-// Unary operations always express primitive values. delete and ! express
-// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
-bool UnaryOperation::IsPrimitive() { return true; }
-
-
-// Count operations (pre- and post-fix increment and decrement) always
-// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2,
-// 11.4.4, ane 11.4.5.
-bool CountOperation::IsPrimitive() { return true; }
-
-
-// Binary operations depend on the operator.
-bool BinaryOperation::IsPrimitive() {
- switch (op()) {
- case Token::COMMA:
- // Value is the value of the right subexpression.
- return right()->IsPrimitive();
-
- case Token::OR:
- case Token::AND:
- // Value is the value one of the subexpressions.
- return left()->IsPrimitive() && right()->IsPrimitive();
-
- default:
- // {|, ^, &, <<, >>, >>>, +, -, *, /, %}
- // Arithmetic operations are always primitive. They express Numbers
- // with the exception of +, which expresses a Number or a String.
- return true;
- }
-}
-
-
-// Compare operations always express Boolean values.
-bool CompareOperation::IsPrimitive() { return true; }
-
-
-// Overridden IsCritical member functions. IsCritical is true for AST nodes
-// whose evaluation is absolutely required (they are never dead) because
-// they are externally visible.
-
-// References to global variables or lookup slots are critical because they
-// may have getters. All others, including parameters rewritten to explicit
-// property references, are not critical.
-bool VariableProxy::IsCritical() {
- Variable* var = AsVariable();
- return var != NULL &&
- (var->slot() == NULL || var->slot()->type() == Slot::LOOKUP);
-}
-
-
-// Literals are never critical.
-bool Literal::IsCritical() { return false; }
-
-
-// Property assignments and throwing of reference errors are always
-// critical. Assignments to escaping variables are also critical. In
-// addition the operation of compound assignments is critical if either of
-// its operands is non-primitive (the arithmetic operations all use one of
-// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands).
-// In this case, we mark the entire AST node as critical because there is
-// no binary operation node to mark.
-bool Assignment::IsCritical() {
- Variable* var = AssignedVariable();
- return var == NULL ||
- !var->IsStackAllocated() ||
- (is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive()));
-}
-
-
-// Property references are always critical, because they may have getters.
-bool Property::IsCritical() { return true; }
-
-
-// Calls are always critical.
-bool Call::IsCritical() { return true; }
-
-
-// +,- use ToNumber on the value of their operand.
-bool UnaryOperation::IsCritical() {
- ASSERT(op() == Token::ADD || op() == Token::SUB);
- return !expression()->IsPrimitive();
-}
-
-
-// Count operations targeting properties and reference errors are always
-// critical. Count operations on escaping variables are critical. Count
-// operations targeting non-primitives are also critical because they use
-// ToNumber.
-bool CountOperation::IsCritical() {
- Variable* var = AssignedVariable();
- return var == NULL ||
- !var->IsStackAllocated() ||
- !expression()->IsPrimitive();
-}
-
-
-// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or
-// ToUint32 on each of their operands.
-bool BinaryOperation::IsCritical() {
- ASSERT(op() != Token::COMMA);
- ASSERT(op() != Token::OR);
- ASSERT(op() != Token::AND);
- return !left()->IsPrimitive() || !right()->IsPrimitive();
-}
-
-
-// <, >, <=, and >= all use ToPrimitive on both their operands.
-bool CompareOperation::IsCritical() {
- ASSERT(op() != Token::EQ);
- ASSERT(op() != Token::NE);
- ASSERT(op() != Token::EQ_STRICT);
- ASSERT(op() != Token::NE_STRICT);
- ASSERT(op() != Token::INSTANCEOF);
- ASSERT(op() != Token::IN);
- return !left()->IsPrimitive() || !right()->IsPrimitive();
-}
-
-
-// Implementation of a copy visitor. The visitor create a deep copy
-// of ast nodes. Nodes that do not require a deep copy are copied
-// with the default copy constructor.
-
-AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
- // AST node number should be unique. Assert that we only copy AstNodes
- // before node numbers are assigned.
- ASSERT(other->num_ == kNoNumber);
-}
-
-
-Statement::Statement(Statement* other)
- : AstNode(other), statement_pos_(other->statement_pos_) {}
-
-
-Expression::Expression(Expression* other)
- : AstNode(other),
- bitfields_(other->bitfields_),
- type_(other->type_) {}
-
-
-BreakableStatement::BreakableStatement(BreakableStatement* other)
- : Statement(other), labels_(other->labels_), type_(other->type_) {}
-
-
-Block::Block(Block* other, ZoneList<Statement*>* statements)
- : BreakableStatement(other),
- statements_(statements->length()),
- is_initializer_block_(other->is_initializer_block_) {
- statements_.AddAll(*statements);
-}
-
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
@@ -795,358 +631,8 @@ WhileStatement::WhileStatement(ZoneStringList* labels)
}
-ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
- Expression* expression)
- : Statement(other), expression_(expression) {}
-
-
-IfStatement::IfStatement(IfStatement* other,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement)
- : Statement(other),
- condition_(condition),
- then_statement_(then_statement),
- else_statement_(else_statement) {}
-
-
-EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
-
-
-IterationStatement::IterationStatement(IterationStatement* other,
- Statement* body)
- : BreakableStatement(other), body_(body) {}
-
-
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {
}
-
-ForStatement::ForStatement(ForStatement* other,
- Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body)
- : IterationStatement(other, body),
- init_(init),
- cond_(cond),
- next_(next),
- may_have_function_literal_(other->may_have_function_literal_),
- loop_variable_(other->loop_variable_),
- peel_this_loop_(other->peel_this_loop_) {}
-
-
-Assignment::Assignment(Assignment* other,
- Expression* target,
- Expression* value)
- : Expression(other),
- op_(other->op_),
- target_(target),
- value_(value),
- pos_(other->pos_),
- block_start_(other->block_start_),
- block_end_(other->block_end_) {}
-
-
-Property::Property(Property* other, Expression* obj, Expression* key)
- : Expression(other),
- obj_(obj),
- key_(key),
- pos_(other->pos_),
- type_(other->type_) {}
-
-
-Call::Call(Call* other,
- Expression* expression,
- ZoneList<Expression*>* arguments)
- : Expression(other),
- expression_(expression),
- arguments_(arguments),
- pos_(other->pos_) {}
-
-
-UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
- : Expression(other), op_(other->op_), expression_(expression) {}
-
-
-BinaryOperation::BinaryOperation(Expression* other,
- Token::Value op,
- Expression* left,
- Expression* right)
- : Expression(other), op_(op), left_(left), right_(right) {}
-
-
-CountOperation::CountOperation(CountOperation* other, Expression* expression)
- : Expression(other),
- is_prefix_(other->is_prefix_),
- op_(other->op_),
- expression_(expression) {}
-
-
-CompareOperation::CompareOperation(CompareOperation* other,
- Expression* left,
- Expression* right)
- : Expression(other),
- op_(other->op_),
- left_(left),
- right_(right) {}
-
-
-Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
- expr_ = NULL;
- if (expr != NULL) Visit(expr);
- return expr_;
-}
-
-
-Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
- stmt_ = NULL;
- if (stmt != NULL) Visit(stmt);
- return stmt_;
-}
-
-
-ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
- ZoneList<Expression*>* expressions) {
- ZoneList<Expression*>* copy =
- new ZoneList<Expression*>(expressions->length());
- for (int i = 0; i < expressions->length(); i++) {
- copy->Add(DeepCopyExpr(expressions->at(i)));
- }
- return copy;
-}
-
-
-ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
- ZoneList<Statement*>* statements) {
- ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
- for (int i = 0; i < statements->length(); i++) {
- copy->Add(DeepCopyStmt(statements->at(i)));
- }
- return copy;
-}
-
-
-void CopyAstVisitor::VisitBlock(Block* stmt) {
- stmt_ = new Block(stmt,
- DeepCopyStmtList(stmt->statements()));
-}
-
-
-void CopyAstVisitor::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
-}
-
-
-void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
- stmt_ = new EmptyStatement(stmt);
-}
-
-
-void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
- stmt_ = new IfStatement(stmt,
- DeepCopyExpr(stmt->condition()),
- DeepCopyStmt(stmt->then_statement()),
- DeepCopyStmt(stmt->else_statement()));
-}
-
-
-void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
- stmt_ = new ForStatement(stmt,
- DeepCopyStmt(stmt->init()),
- DeepCopyExpr(stmt->cond()),
- DeepCopyStmt(stmt->next()),
- DeepCopyStmt(stmt->body()));
-}
-
-
-void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitConditional(Conditional* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
- expr_ = new VariableProxy(*expr);
-}
-
-
-void CopyAstVisitor::VisitLiteral(Literal* expr) {
- expr_ = new Literal(*expr);
-}
-
-
-void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitAssignment(Assignment* expr) {
- expr_ = new Assignment(expr,
- DeepCopyExpr(expr->target()),
- DeepCopyExpr(expr->value()));
-}
-
-
-void CopyAstVisitor::VisitThrow(Throw* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitProperty(Property* expr) {
- expr_ = new Property(expr,
- DeepCopyExpr(expr->obj()),
- DeepCopyExpr(expr->key()));
-}
-
-
-void CopyAstVisitor::VisitCall(Call* expr) {
- expr_ = new Call(expr,
- DeepCopyExpr(expr->expression()),
- DeepCopyExprList(expr->arguments()));
-}
-
-
-void CopyAstVisitor::VisitCallNew(CallNew* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
- expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
-}
-
-
-void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
- expr_ = new CountOperation(expr,
- DeepCopyExpr(expr->expression()));
-}
-
-
-void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
- expr_ = new BinaryOperation(expr,
- expr->op(),
- DeepCopyExpr(expr->left()),
- DeepCopyExpr(expr->right()));
-}
-
-
-void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
- expr_ = new CompareOperation(expr,
- DeepCopyExpr(expr->left()),
- DeepCopyExpr(expr->right()));
-}
-
-
-void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
- SetStackOverflow();
-}
-
-
-void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
} } // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index b9a7a3dd..5071b2cd 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -89,9 +89,11 @@ namespace internal {
V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
+ V(IncrementOperation) \
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
+ V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@@ -118,12 +120,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
- static const int kNoNumber = -1;
-
- AstNode() : num_(kNoNumber) {}
-
- explicit AstNode(AstNode* other);
-
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -151,20 +147,6 @@ class AstNode: public ZoneObject {
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
-
- // True if the AST node is critical (its execution is needed or externally
- // visible in some way).
- virtual bool IsCritical() {
- UNREACHABLE();
- return true;
- }
-
- int num() { return num_; }
- void set_num(int n) { num_ = n; }
-
- private:
- // Support for ast node numbering.
- int num_;
};
@@ -172,8 +154,6 @@ class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
- explicit Statement(Statement* other);
-
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
@@ -201,48 +181,33 @@ class Expression: public AstNode {
// Evaluated for its value (and side effects).
kValue,
// Evaluated for control flow (and side effects).
- kTest,
- // Evaluated for control flow and side effects. Value is also
- // needed if true.
- kValueTest,
- // Evaluated for control flow and side effects. Value is also
- // needed if false.
- kTestValue
+ kTest
};
Expression() : bitfields_(0) {}
- explicit Expression(Expression* other);
-
virtual Expression* AsExpression() { return this; }
+ virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
- virtual Variable* AssignedVariable() { return NULL; }
-
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
- // True if the expression does not have (evaluated) subexpressions.
- // Function literals are leaves because their subexpressions are not
- // evaluated.
- virtual bool IsLeaf() { return false; }
-
- // True if the expression has no side effects and is safe to
- // evaluate out of order.
- virtual bool IsTrivial() { return false; }
-
- // True if the expression always has one of the non-Object JS types
- // (Undefined, Null, Boolean, String, or Number).
- virtual bool IsPrimitive() = 0;
-
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
virtual void MarkAsStatement() { /* do nothing */ }
+ // True iff the result can be safely overwritten (to avoid allocation).
+ // False for operations that can return one of their operands.
+ virtual bool ResultOverwriteAllowed() { return false; }
+
+ // True iff the expression is a literal represented as a smi.
+ virtual bool IsSmiLiteral() { return false; }
+
// Static type information for this expression.
StaticType* type() { return &type_; }
@@ -259,7 +224,8 @@ class Expression: public AstNode {
// top operation is a bit operation with a mask, or a shift.
bool GuaranteedSmiResult();
- // AST analysis results
+ // AST analysis results.
+ void CopyAnalysisResultsFrom(Expression* other);
// True if the expression rooted at this node can be compiled by the
// side-effect free compiler.
@@ -320,11 +286,6 @@ class ValidLeftHandSideSentinel: public Expression {
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
static ValidLeftHandSideSentinel* instance() { return &instance_; }
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
private:
static ValidLeftHandSideSentinel instance_;
};
@@ -353,8 +314,6 @@ class BreakableStatement: public Statement {
protected:
inline BreakableStatement(ZoneStringList* labels, Type type);
- explicit BreakableStatement(BreakableStatement* other);
-
private:
ZoneStringList* labels_;
Type type_;
@@ -366,10 +325,6 @@ class Block: public BreakableStatement {
public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
- // Construct a clone initialized from the original block and
- // a deep copy of all statements of the original block.
- Block(Block* other, ZoneList<Statement*>* statements);
-
virtual void Accept(AstVisitor* v);
virtual Block* AsBlock() { return this; }
@@ -433,10 +388,6 @@ class IterationStatement: public BreakableStatement {
protected:
explicit inline IterationStatement(ZoneStringList* labels);
- // Construct a clone initialized from original and
- // a deep copy of the original body.
- IterationStatement(IterationStatement* other, Statement* body);
-
void Initialize(Statement* body) {
body_ = body;
}
@@ -486,13 +437,14 @@ class WhileStatement: public IterationStatement {
bool may_have_function_literal() const {
return may_have_function_literal_;
}
+ void set_may_have_function_literal(bool value) {
+ may_have_function_literal_ = value;
+ }
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
-
- friend class AstOptimizer;
};
@@ -500,14 +452,6 @@ class ForStatement: public IterationStatement {
public:
explicit inline ForStatement(ZoneStringList* labels);
- // Construct a for-statement initialized from another for-statement
- // and deep copies of all parts of the original statement.
- ForStatement(ForStatement* other,
- Statement* init,
- Expression* cond,
- Statement* next,
- Statement* body);
-
virtual ForStatement* AsForStatement() { return this; }
void Initialize(Statement* init,
@@ -528,17 +472,18 @@ class ForStatement: public IterationStatement {
void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; }
+
bool may_have_function_literal() const {
return may_have_function_literal_;
}
+ void set_may_have_function_literal(bool value) {
+ may_have_function_literal_ = value;
+ }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; }
- bool peel_this_loop() { return peel_this_loop_; }
- void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
-
private:
Statement* init_;
Expression* cond_;
@@ -546,9 +491,6 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
- bool peel_this_loop_;
-
- friend class AstOptimizer;
};
@@ -578,10 +520,6 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression)
: expression_(expression) { }
- // Construct an expression statement initialized from another
- // expression statement and a deep copy of the original expression.
- ExpressionStatement(ExpressionStatement* other, Expression* expression);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion.
@@ -721,13 +659,6 @@ class IfStatement: public Statement {
then_statement_(then_statement),
else_statement_(else_statement) { }
- // Construct an if-statement initialized from another if-statement
- // and deep copies of all parts of the original.
- IfStatement(IfStatement* other,
- Expression* condition,
- Statement* then_statement,
- Statement* else_statement);
-
virtual void Accept(AstVisitor* v);
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@@ -834,8 +765,6 @@ class EmptyStatement: public Statement {
public:
EmptyStatement() {}
- explicit EmptyStatement(EmptyStatement* other);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion.
@@ -848,6 +777,8 @@ class Literal: public Expression {
explicit Literal(Handle<Object> handle) : handle_(handle) { }
virtual void Accept(AstVisitor* v);
+ virtual bool IsTrivial() { return true; }
+ virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Type testing & conversion.
virtual Literal* AsLiteral() { return this; }
@@ -865,11 +796,6 @@ class Literal: public Expression {
return false;
}
- virtual bool IsLeaf() { return true; }
- virtual bool IsTrivial() { return true; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -916,7 +842,6 @@ class ObjectLiteral: public MaterializedLiteral {
// to the code generator.
class Property: public ZoneObject {
public:
-
enum Kind {
CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time).
@@ -954,10 +879,6 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return properties()->is_empty(); }
-
- virtual bool IsPrimitive();
-
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
@@ -984,10 +905,6 @@ class RegExpLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive();
-
Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; }
@@ -1012,10 +929,6 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
- virtual bool IsLeaf() { return values()->is_empty(); }
-
- virtual bool IsPrimitive();
-
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
@@ -1036,8 +949,6 @@ class CatchExtensionObject: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@@ -1055,7 +966,10 @@ class VariableProxy: public Expression {
virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty();
}
- virtual VariableProxy* AsVariableProxy() { return this; }
+
+ virtual VariableProxy* AsVariableProxy() {
+ return this;
+ }
Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
@@ -1065,20 +979,12 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
}
- virtual bool IsLeaf() {
- ASSERT(var_ != NULL); // Variable must be resolved.
- return var()->is_global() || var()->rewrite()->IsLeaf();
+ virtual bool IsTrivial() {
+ // Reading from a mutable variable is a side effect, but the
+ // variable for 'this' is immutable.
+ return is_this_ || is_trivial_;
}
- // Reading from a mutable variable is a side effect, but 'this' is
- // immutable.
- virtual bool IsTrivial() { return is_trivial_; }
-
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
- void SetIsPrimitive(bool value) { is_primitive_ = value; }
-
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@@ -1092,11 +998,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
- bool is_trivial() { return is_trivial_; }
- void set_is_trivial(bool b) { is_trivial_ = b; }
- BitVector* reaching_definitions() { return reaching_definitions_; }
- void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; }
+ void MarkAsTrivial() { is_trivial_ = true; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1107,8 +1010,6 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
bool is_trivial_;
- BitVector* reaching_definitions_;
- bool is_primitive_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@@ -1125,11 +1026,6 @@ class VariableProxySentinel: public VariableProxy {
return &identifier_proxy_;
}
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
static VariableProxySentinel this_proxy_;
@@ -1171,13 +1067,6 @@ class Slot: public Expression {
// Type testing & conversion
virtual Slot* AsSlot() { return this; }
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive() {
- UNREACHABLE();
- return false;
- }
-
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors
@@ -1203,8 +1092,6 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { }
- Property(Property* other, Expression* obj, Expression* key);
-
virtual void Accept(AstVisitor* v);
// Type testing & conversion
@@ -1212,9 +1099,6 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
int position() const { return pos_; }
@@ -1240,16 +1124,11 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { }
- Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
-
virtual void Accept(AstVisitor* v);
// Type testing and conversion.
virtual Call* AsCall() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1272,8 +1151,6 @@ class CallNew: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1298,8 +1175,6 @@ class CallRuntime: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1319,16 +1194,12 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op));
}
- UnaryOperation(UnaryOperation* other, Expression* expression);
-
virtual void Accept(AstVisitor* v);
+ virtual bool ResultOverwriteAllowed();
// Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1340,120 +1211,102 @@ class UnaryOperation: public Expression {
class BinaryOperation: public Expression {
public:
- BinaryOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right) {
+ BinaryOperation(Token::Value op,
+ Expression* left,
+ Expression* right,
+ int pos)
+ : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op));
}
- // Construct a binary operation with a given operator and left and right
- // subexpressions. The rest of the expression state is copied from
- // another expression.
- BinaryOperation(Expression* other,
- Token::Value op,
- Expression* left,
- Expression* right);
+ // Create the binary operation corresponding to a compound assignment.
+ explicit BinaryOperation(Assignment* assignment);
virtual void Accept(AstVisitor* v);
+ virtual bool ResultOverwriteAllowed();
// Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
- // True iff the result can be safely overwritten (to avoid allocation).
- // False for operations that can return one of their operands.
- bool ResultOverwriteAllowed() {
- switch (op_) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- return false;
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- return true;
- default:
- UNREACHABLE();
- }
- return false;
- }
-
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ int position() const { return pos_; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
+ int pos_;
};
-class CountOperation: public Expression {
+class IncrementOperation: public Expression {
public:
- CountOperation(bool is_prefix, Token::Value op, Expression* expression)
- : is_prefix_(is_prefix), op_(op), expression_(expression) {
+ IncrementOperation(Token::Value op, Expression* expr)
+ : op_(op), expression_(expr) {
ASSERT(Token::IsCountOp(op));
}
- CountOperation(CountOperation* other, Expression* expression);
+ Token::Value op() const { return op_; }
+ bool is_increment() { return op_ == Token::INC; }
+ Expression* expression() const { return expression_; }
virtual void Accept(AstVisitor* v);
- virtual CountOperation* AsCountOperation() { return this; }
+ private:
+ Token::Value op_;
+ Expression* expression_;
+ int pos_;
+};
- virtual Variable* AssignedVariable() {
- return expression()->AsVariableProxy()->AsVariable();
- }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
+class CountOperation: public Expression {
+ public:
+ CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
+ : is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
+
+ virtual void Accept(AstVisitor* v);
+
+ virtual CountOperation* AsCountOperation() { return this; }
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
- Token::Value op() const { return op_; }
+
+ Token::Value op() const { return increment_->op(); }
Token::Value binary_op() {
- return op_ == Token::INC ? Token::ADD : Token::SUB;
+ return (op() == Token::INC) ? Token::ADD : Token::SUB;
}
- Expression* expression() const { return expression_; }
+
+ Expression* expression() const { return increment_->expression(); }
+ IncrementOperation* increment() const { return increment_; }
+ int position() const { return pos_; }
virtual void MarkAsStatement() { is_prefix_ = true; }
private:
bool is_prefix_;
- Token::Value op_;
- Expression* expression_;
+ IncrementOperation* increment_;
+ int pos_;
};
class CompareOperation: public Expression {
public:
- CompareOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right) {
+ CompareOperation(Token::Value op,
+ Expression* left,
+ Expression* right,
+ int pos)
+ : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
- CompareOperation(CompareOperation* other,
- Expression* left,
- Expression* right);
-
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ int position() const { return pos_; }
// Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; }
@@ -1462,6 +1315,24 @@ class CompareOperation: public Expression {
Token::Value op_;
Expression* left_;
Expression* right_;
+ int pos_;
+};
+
+
+class CompareToNull: public Expression {
+ public:
+ CompareToNull(bool is_strict, Expression* expression)
+ : is_strict_(is_strict), expression_(expression) { }
+
+ virtual void Accept(AstVisitor* v);
+
+ bool is_strict() const { return is_strict_; }
+ Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
+ Expression* expression() const { return expression_; }
+
+ private:
+ bool is_strict_;
+ Expression* expression_;
};
@@ -1480,8 +1351,6 @@ class Conditional: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@@ -1506,20 +1375,11 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op));
}
- Assignment(Assignment* other, Expression* target, Expression* value);
-
virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; }
- virtual bool IsPrimitive();
- virtual bool IsCritical();
-
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
- virtual Variable* AssignedVariable() {
- return target()->AsVariableProxy()->AsVariable();
- }
-
Token::Value binary_op() const;
Token::Value op() const { return op_; }
@@ -1555,8 +1415,6 @@ class Throw: public Expression {
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@@ -1578,7 +1436,8 @@ class FunctionLiteral: public Expression {
int num_parameters,
int start_position,
int end_position,
- bool is_expression)
+ bool is_expression,
+ bool contains_loops)
: name_(name),
scope_(scope),
body_(body),
@@ -1591,6 +1450,7 @@ class FunctionLiteral: public Expression {
start_position_(start_position),
end_position_(end_position),
is_expression_(is_expression),
+ contains_loops_(contains_loops),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false) {
@@ -1604,10 +1464,6 @@ class FunctionLiteral: public Expression {
// Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; }
- virtual bool IsLeaf() { return true; }
-
- virtual bool IsPrimitive();
-
Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
@@ -1616,6 +1472,7 @@ class FunctionLiteral: public Expression {
int start_position() const { return start_position_; }
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
+ bool contains_loops() const { return contains_loops_; }
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
@@ -1656,6 +1513,7 @@ class FunctionLiteral: public Expression {
int start_position_;
int end_position_;
bool is_expression_;
+ bool contains_loops_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_full_codegen_;
@@ -1675,12 +1533,8 @@ class SharedFunctionInfoLiteral: public Expression {
return shared_function_info_;
}
- virtual bool IsLeaf() { return true; }
-
virtual void Accept(AstVisitor* v);
- virtual bool IsPrimitive();
-
private:
Handle<SharedFunctionInfo> shared_function_info_;
};
@@ -1689,8 +1543,6 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression {
public:
virtual void Accept(AstVisitor* v);
- virtual bool IsLeaf() { return true; }
- virtual bool IsPrimitive();
};
@@ -2078,29 +1930,6 @@ class AstVisitor BASE_EMBEDDED {
bool stack_overflow_;
};
-
-class CopyAstVisitor : public AstVisitor {
- public:
- Expression* DeepCopyExpr(Expression* expr);
-
- Statement* DeepCopyStmt(Statement* stmt);
-
- private:
- ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
-
- ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Holds the result of copying an expression.
- Expression* expr_;
- // Holds the result of copying a statement.
- Statement* stmt_;
-};
-
} } // namespace v8::internal
#endif // V8_AST_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index ce8e98d6..a82d1d69 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -232,6 +232,7 @@ class Genesis BASE_EMBEDDED {
bool InstallNatives();
void InstallCustomCallGenerators();
void InstallJSFunctionResultCaches();
+ void InitializeNormalizedMapCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
// provided.
static bool InstallExtensions(Handle<Context> global_context,
@@ -719,6 +720,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
Top::initial_object_prototype(), Builtins::Illegal,
true);
+ string_fun->shared()->set_construct_stub(
+ Builtins::builtin(Builtins::StringConstructCode));
global_context()->set_string_function(*string_fun);
// Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors =
@@ -1400,6 +1403,13 @@ void Genesis::InstallJSFunctionResultCaches() {
}
+void Genesis::InitializeNormalizedMapCaches() {
+ Handle<FixedArray> array(
+ Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED));
+ global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
+}
+
+
int BootstrapperActive::nesting_ = 0;
@@ -1768,6 +1778,7 @@ Genesis::Genesis(Handle<Object> global_object,
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches();
+ InitializeNormalizedMapCaches();
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
diff --git a/src/builtins.cc b/src/builtins.cc
index a64bf4ec..b4f4a061 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -243,7 +243,7 @@ BUILTIN(ArrayCodeGeneric) {
}
-static Object* AllocateJSArray() {
+MUST_USE_RESULT static Object* AllocateJSArray() {
JSFunction* array_function =
Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function);
@@ -252,7 +252,7 @@ static Object* AllocateJSArray() {
}
-static Object* AllocateEmptyJSArray() {
+MUST_USE_RESULT static Object* AllocateEmptyJSArray() {
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
@@ -663,13 +663,9 @@ BUILTIN(ArraySplice) {
int n_arguments = args.length() - 1;
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
+ // Return empty array when no arguments are supplied.
if (n_arguments == 0) {
- return Heap::undefined_value();
+ return AllocateEmptyJSArray();
}
int relative_start = 0;
diff --git a/src/builtins.h b/src/builtins.h
index 375e8f3f..7e49f313 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -117,7 +117,10 @@ enum BuiltinExtraArguments {
V(FunctionApply, BUILTIN, UNINITIALIZED) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED) \
- V(ArrayConstructCode, BUILTIN, UNINITIALIZED)
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
+ \
+ V(StringConstructCode, BUILTIN, UNINITIALIZED)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -258,6 +261,8 @@ class Builtins : public AllStatic {
static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_ArrayConstructCode(MacroAssembler* masm);
+
+ static void Generate_StringConstructCode(MacroAssembler* masm);
};
} } // namespace v8::internal
diff --git a/src/circular-queue.cc b/src/circular-queue.cc
index af650de5..928c3f0c 100644
--- a/src/circular-queue.cc
+++ b/src/circular-queue.cc
@@ -47,8 +47,9 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2);
- // Only need to keep the first cell of a chunk clean.
- for (int i = 0; i < buffer_size_; i += chunk_size_) {
+ // Clean up the whole buffer to avoid encountering a random kEnd
+ // while enqueuing.
+ for (int i = 0; i < buffer_size_; ++i) {
buffer_[i] = kClear;
}
buffer_[buffer_size_] = kEnd;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index e5a222fc..c2dd0a7a 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -29,6 +29,7 @@
#define V8_CODE_STUBS_H_
#include "globals.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -80,6 +81,14 @@ namespace internal {
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
+// Types of uncatchable exceptions.
+enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
+
+
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
@@ -105,6 +114,12 @@ class CodeStub BASE_EMBEDDED {
static int MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key);
};
+
+ // Gets the major key from a code object that is a code stub or binary op IC.
+ static Major GetMajorKey(Code* code_stub) {
+ return static_cast<Major>(code_stub->major_key());
+ }
+
static const char* MajorName(Major major_key, bool allow_unknown_keys);
virtual ~CodeStub() {}
@@ -172,6 +187,609 @@ class CodeStub BASE_EMBEDDED {
friend class BreakPointIterator;
};
+
+// Helper interface to prepare to/restore after making runtime calls.
+class RuntimeCallHelper {
+ public:
+ virtual ~RuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const = 0;
+
+ virtual void AfterCall(MacroAssembler* masm) const = 0;
+
+ protected:
+ RuntimeCallHelper() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
+};
+
+} } // namespace v8::internal
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/code-stubs-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/code-stubs-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/code-stubs-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/code-stubs-mips.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// newly created internal frame before/after the runtime call.
+class ICRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ ICRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+};
+
+
+// Trivial RuntimeCallHelper implementation.
+class NopRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ NopRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const {}
+
+ virtual void AfterCall(MacroAssembler* masm) const {}
+};
+
+
+class StackCheckStub : public CodeStub {
+ public:
+ StackCheckStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+
+ const char* GetName() { return "StackCheckStub"; }
+
+ Major MajorKey() { return StackCheck; }
+ int MinorKey() { return 0; }
+};
+
+
+class FastNewClosureStub : public CodeStub {
+ public:
+ void Generate(MacroAssembler* masm);
+
+ private:
+ const char* GetName() { return "FastNewClosureStub"; }
+ Major MajorKey() { return FastNewClosure; }
+ int MinorKey() { return 0; }
+};
+
+
+class FastNewContextStub : public CodeStub {
+ public:
+ static const int kMaximumSlots = 64;
+
+ explicit FastNewContextStub(int slots) : slots_(slots) {
+ ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int slots_;
+
+ const char* GetName() { return "FastNewContextStub"; }
+ Major MajorKey() { return FastNewContext; }
+ int MinorKey() { return slots_; }
+};
+
+
+class FastCloneShallowArrayStub : public CodeStub {
+ public:
+ // Maximum length of copied elements array.
+ static const int kMaximumClonedLength = 8;
+
+ enum Mode {
+ CLONE_ELEMENTS,
+ COPY_ON_WRITE_ELEMENTS
+ };
+
+ FastCloneShallowArrayStub(Mode mode, int length)
+ : mode_(mode),
+ length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
+ ASSERT(length_ >= 0);
+ ASSERT(length_ <= kMaximumClonedLength);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Mode mode_;
+ int length_;
+
+ const char* GetName() { return "FastCloneShallowArrayStub"; }
+ Major MajorKey() { return FastCloneShallowArray; }
+ int MinorKey() {
+ ASSERT(mode_ == 0 || mode_ == 1);
+ return (length_ << 1) | mode_;
+ }
+};
+
+
+class InstanceofStub: public CodeStub {
+ public:
+ InstanceofStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return Instanceof; }
+ int MinorKey() { return 0; }
+};
+
+
+enum NegativeZeroHandling {
+ kStrictNegativeZero,
+ kIgnoreNegativeZero
+};
+
+
+class GenericUnaryOpStub : public CodeStub {
+ public:
+ GenericUnaryOpStub(Token::Value op,
+ UnaryOverwriteMode overwrite,
+ NegativeZeroHandling negative_zero = kStrictNegativeZero)
+ : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode overwrite_;
+ NegativeZeroHandling negative_zero_;
+
+ class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
+ class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
+
+ Major MajorKey() { return GenericUnaryOp; }
+ int MinorKey() {
+ return OpField::encode(op_) |
+ OverwriteField::encode(overwrite_) |
+ NegativeZeroField::encode(negative_zero_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName();
+};
+
+
+enum NaNInformation {
+ kBothCouldBeNaN,
+ kCantBothBeNaN
+};
+
+
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc,
+ bool strict,
+ NaNInformation nan_info = kBothCouldBeNaN,
+ bool include_number_compare = true,
+ Register lhs = no_reg,
+ Register rhs = no_reg) :
+ cc_(cc),
+ strict_(strict),
+ never_nan_nan_(nan_info == kCantBothBeNaN),
+ include_number_compare_(include_number_compare),
+ lhs_(lhs),
+ rhs_(rhs),
+ name_(NULL) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+ // Only used for 'equal' comparisons. Tells the stub that we already know
+ // that at least one side of the comparison is not NaN. This allows the
+ // stub to use object identity in the positive case. We ignore it when
+ // generating the minor key for other comparisons to avoid creating more
+ // stubs.
+ bool never_nan_nan_;
+ // Do generate the number comparison code in the stub. Stubs without number
+ // comparison code is used when the number comparison has been inlined, and
+ // the stub will be called if one of the operands is not a number.
+ bool include_number_compare_;
+ // Register holding the left hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register lhs_;
+ // Register holding the right hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register rhs_;
+
+ // Encoding of the minor key CCCCCCCCCCCCRCNS.
+ class StrictField: public BitField<bool, 0, 1> {};
+ class NeverNanNanField: public BitField<bool, 1, 1> {};
+ class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
+ class RegisterField: public BitField<bool, 3, 1> {};
+ class ConditionField: public BitField<int, 4, 12> {};
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey();
+
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch);
+
+ // Unfortunately you have to run without snapshots to see most of these
+ // names in the profile since most compare stubs end up in the snapshot.
+ char* name_;
+ const char* GetName();
+#ifdef DEBUG
+ void Print() {
+ PrintF("CompareStub (cc %d), (strict %s), "
+ "(never_nan_nan %s), (number_compare %s) ",
+ static_cast<int>(cc_),
+ strict_ ? "true" : "false",
+ never_nan_nan_ ? "true" : "false",
+ include_number_compare_ ? "included" : "not included");
+
+ if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
+ PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
+ } else {
+ PrintF("\n");
+ }
+ }
+#endif
+};
+
+
+class CEntryStub : public CodeStub {
+ public:
+ explicit CEntryStub(int result_size) : result_size_(result_size) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ void GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int alignment_skew = 0);
+ void GenerateThrowTOS(MacroAssembler* masm);
+ void GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type);
+
+ // Number of pointers/values returned.
+ const int result_size_;
+
+ Major MajorKey() { return CEntry; }
+ // Minor key must differ if different result_size_ values means different
+ // code is generated.
+ int MinorKey();
+
+ const char* GetName() { return "CEntryStub"; }
+};
+
+
+class ApiGetterEntryStub : public CodeStub {
+ public:
+ ApiGetterEntryStub(Handle<AccessorInfo> info,
+ ApiFunction* fun)
+ : info_(info),
+ fun_(fun) { }
+ void Generate(MacroAssembler* masm);
+ virtual bool has_custom_cache() { return true; }
+ virtual bool GetCustomCache(Code** code_out);
+ virtual void SetCustomCache(Code* value);
+
+ static const int kStackSpace = 5;
+ static const int kArgc = 4;
+ private:
+ Handle<AccessorInfo> info() { return info_; }
+ ApiFunction* fun() { return fun_; }
+ Major MajorKey() { return NoCache; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ApiEntryStub"; }
+ // The accessor info associated with the function.
+ Handle<AccessorInfo> info_;
+ // The function to be called.
+ ApiFunction* fun_;
+};
+
+
+class JSEntryStub : public CodeStub {
+ public:
+ JSEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
+
+ protected:
+ void GenerateBody(MacroAssembler* masm, bool is_construct);
+
+ private:
+ Major MajorKey() { return JSEntry; }
+ int MinorKey() { return 0; }
+
+ const char* GetName() { return "JSEntryStub"; }
+};
+
+
+class JSConstructEntryStub : public JSEntryStub {
+ public:
+ JSConstructEntryStub() { }
+
+ void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
+
+ private:
+ int MinorKey() { return 1; }
+
+ const char* GetName() { return "JSConstructEntryStub"; }
+};
+
+
+class ArgumentsAccessStub: public CodeStub {
+ public:
+ enum Type {
+ READ_ELEMENT,
+ NEW_OBJECT
+ };
+
+ explicit ArgumentsAccessStub(Type type) : type_(type) { }
+
+ private:
+ Type type_;
+
+ Major MajorKey() { return ArgumentsAccess; }
+ int MinorKey() { return type_; }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateReadElement(MacroAssembler* masm);
+ void GenerateNewObject(MacroAssembler* masm);
+
+ const char* GetName() { return "ArgumentsAccessStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("ArgumentsAccessStub (type %d)\n", type_);
+ }
+#endif
+};
+
+
+class RegExpExecStub: public CodeStub {
+ public:
+ RegExpExecStub() { }
+
+ private:
+ Major MajorKey() { return RegExpExec; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "RegExpExecStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RegExpExecStub\n");
+ }
+#endif
+};
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
+ : argc_(argc), in_loop_(in_loop), flags_(flags) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+ CallFunctionFlags flags_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
+ argc_,
+ static_cast<int>(in_loop_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
+ class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
+ class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
+ class ArgcBits: public BitField<int, 2, 32 - 2> {};
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() {
+ // Encode the parameters in a unique 32 bit value.
+ return InLoopBits::encode(in_loop_)
+ | FlagBits::encode(flags_)
+ | ArgcBits::encode(argc_);
+ }
+
+ InLoopFlag InLoop() { return in_loop_; }
+ bool ReceiverMightBeValue() {
+ return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+ }
+
+ public:
+ static int ExtractArgcFromMinorKey(int minor_key) {
+ return ArgcBits::decode(minor_key);
+ }
+};
+
+
+enum StringIndexFlags {
+ // Accepts smis or heap numbers.
+ STRING_INDEX_IS_NUMBER,
+
+ // Accepts smis or heap numbers that are valid array indices
+ // (ECMA-262 15.4). Invalid indices are reported as being out of
+ // range.
+ STRING_INDEX_IS_ARRAY_INDEX
+};
+
+
+// Generates code implementing String.prototype.charCodeAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch| and |result| are clobbered.
+class StringCharCodeAtGenerator {
+ public:
+ StringCharCodeAtGenerator(Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : object_(object),
+ index_(index),
+ scratch_(scratch),
+ result_(result),
+ receiver_not_string_(receiver_not_string),
+ index_not_number_(index_not_number),
+ index_out_of_range_(index_out_of_range),
+ index_flags_(index_flags) {
+ ASSERT(!scratch_.is(object_));
+ ASSERT(!scratch_.is(index_));
+ ASSERT(!scratch_.is(result_));
+ ASSERT(!result_.is(object_));
+ ASSERT(!result_.is(index_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register object_;
+ Register index_;
+ Register scratch_;
+ Register result_;
+
+ Label* receiver_not_string_;
+ Label* index_not_number_;
+ Label* index_out_of_range_;
+
+ StringIndexFlags index_flags_;
+
+ Label call_runtime_;
+ Label index_not_smi_;
+ Label got_smi_index_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
+};
+
+
+// Generates code for creating a one-char string from a char code.
+class StringCharFromCodeGenerator {
+ public:
+ StringCharFromCodeGenerator(Register code,
+ Register result)
+ : code_(code),
+ result_(result) {
+ ASSERT(!code_.is(result_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register code_;
+ Register result_;
+
+ Label slow_case_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
+};
+
+
+// Generates code implementing String.prototype.charAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
+class StringCharAtGenerator {
+ public:
+ StringCharAtGenerator(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : char_code_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ receiver_not_string,
+ index_not_number,
+ index_out_of_range,
+ index_flags),
+ char_from_code_generator_(scratch2, result) {}
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ StringCharCodeAtGenerator char_code_at_generator_;
+ StringCharFromCodeGenerator char_from_code_generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index a9fab43f..20fb310b 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -339,6 +339,11 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
}
+void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
// List of special runtime calls which are generated inline. For some of these
// functions the code will be generated inline, and for others a call to a code
// stub will be inlined.
@@ -496,12 +501,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
+ ASSERT(result_size_ == 1 || result_size_ == 2);
#ifdef _WIN64
- return ExitFrameModeBits::encode(mode_)
- | IndirectResultBits::encode(result_size_ > 1);
+ return result_size_ == 1 ? 0 : 1;
#else
- return ExitFrameModeBits::encode(mode_);
+ return 0;
#endif
}
diff --git a/src/codegen.h b/src/codegen.h
index 56c175e4..3373d1c0 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -73,13 +73,6 @@
// CodeForSourcePosition
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
-
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
-
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \
@@ -116,7 +109,9 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(MathSin, 1, 1) \
F(MathCos, 1, 1) \
F(MathSqrt, 1, 1) \
- F(IsRegExpEquivalent, 2, 1)
+ F(IsRegExpEquivalent, 2, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1)
#if V8_TARGET_ARCH_IA32
@@ -136,29 +131,6 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
namespace v8 {
namespace internal {
-// Support for "structured" code comments.
-#ifdef DEBUG
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler* masm, const char* msg);
- ~Comment();
-
- private:
- MacroAssembler* masm_;
- const char* msg_;
-};
-
-#else
-
-class Comment BASE_EMBEDDED {
- public:
- Comment(MacroAssembler*, const char*) {}
-};
-
-#endif // DEBUG
-
-
// Code generation can be nested. Code generation scopes form a stack
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
@@ -231,23 +203,6 @@ class FrameRegisterState {
#endif
-// Helper interface to prepare to/restore after making runtime calls.
-class RuntimeCallHelper {
- public:
- virtual ~RuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const = 0;
-
- virtual void AfterCall(MacroAssembler* masm) const = 0;
-
- protected:
- RuntimeCallHelper() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
-};
-
-
// RuntimeCallHelper implementation that saves/restores state of a
// virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
@@ -265,29 +220,6 @@ class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
};
-// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
-// newly created internal frame before/after the runtime call.
-class ICRuntimeCallHelper : public RuntimeCallHelper {
- public:
- ICRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const;
-
- virtual void AfterCall(MacroAssembler* masm) const;
-};
-
-
-// Trivial RuntimeCallHelper implementation.
-class NopRuntimeCallHelper : public RuntimeCallHelper {
- public:
- NopRuntimeCallHelper() {}
-
- virtual void BeforeCall(MacroAssembler* masm) const {}
-
- virtual void AfterCall(MacroAssembler* masm) const {}
-};
-
-
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@@ -350,560 +282,7 @@ class DeferredCode: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
-class StackCheckStub : public CodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
-
- const char* GetName() { return "StackCheckStub"; }
-
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewClosureStub : public CodeStub {
- public:
- void Generate(MacroAssembler* masm);
-
- private:
- const char* GetName() { return "FastNewClosureStub"; }
- Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return 0; }
-};
-
-
-class FastNewContextStub : public CodeStub {
- public:
- static const int kMaximumSlots = 64;
-
- explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int slots_;
-
- const char* GetName() { return "FastNewContextStub"; }
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
-};
-
-
-class FastCloneShallowArrayStub : public CodeStub {
- public:
- // Maximum length of copied elements array.
- static const int kMaximumClonedLength = 8;
-
- enum Mode {
- CLONE_ELEMENTS,
- COPY_ON_WRITE_ELEMENTS
- };
-
- FastCloneShallowArrayStub(Mode mode, int length)
- : mode_(mode),
- length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT(length_ >= 0);
- ASSERT(length_ <= kMaximumClonedLength);
- }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Mode mode_;
- int length_;
-
- const char* GetName() { return "FastCloneShallowArrayStub"; }
- Major MajorKey() { return FastCloneShallowArray; }
- int MinorKey() {
- ASSERT(mode_ == 0 || mode_ == 1);
- return (length_ << 1) | mode_;
- }
-};
-
-
-class InstanceofStub: public CodeStub {
- public:
- InstanceofStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Instanceof; }
- int MinorKey() { return 0; }
-};
-
-
-enum NegativeZeroHandling {
- kStrictNegativeZero,
- kIgnoreNegativeZero
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
- GenericUnaryOpStub(Token::Value op,
- UnaryOverwriteMode overwrite,
- NegativeZeroHandling negative_zero = kStrictNegativeZero)
- : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode overwrite_;
- NegativeZeroHandling negative_zero_;
-
- class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
- class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
- class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
-
- Major MajorKey() { return GenericUnaryOp; }
- int MinorKey() {
- return OpField::encode(op_) |
- OverwriteField::encode(overwrite_) |
- NegativeZeroField::encode(negative_zero_);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName();
-};
-
-
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
-
-
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- NaNInformation nan_info = kBothCouldBeNaN,
- bool include_number_compare = true,
- Register lhs = no_reg,
- Register rhs = no_reg) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_(nan_info == kCantBothBeNaN),
- include_number_compare_(include_number_compare),
- lhs_(lhs),
- rhs_(rhs),
- name_(NULL) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key CCCCCCCCCCCCRCNS.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class RegisterField: public BitField<bool, 3, 1> {};
- class ConditionField: public BitField<int, 4, 12> {};
-
- Major MajorKey() { return Compare; }
-
- int MinorKey();
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
-
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- char* name_;
- const char* GetName();
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (cc %d), (strict %s), "
- "(never_nan_nan %s), (number_compare %s) ",
- static_cast<int>(cc_),
- strict_ ? "true" : "false",
- never_nan_nan_ ? "true" : "false",
- include_number_compare_ ? "included" : "not included");
-
- if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
- PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
- } else {
- PrintF("\n");
- }
- }
-#endif
-};
-
-
-class CEntryStub : public CodeStub {
- public:
- explicit CEntryStub(int result_size,
- ExitFrame::Mode mode = ExitFrame::MODE_NORMAL)
- : result_size_(result_size), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- void GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int alignment_skew = 0);
- void GenerateThrowTOS(MacroAssembler* masm);
- void GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type);
-
- // Number of pointers/values returned.
- const int result_size_;
- const ExitFrame::Mode mode_;
-
- // Minor key encoding
- class ExitFrameModeBits: public BitField<ExitFrame::Mode, 0, 1> {};
- class IndirectResultBits: public BitField<bool, 1, 1> {};
-
- Major MajorKey() { return CEntry; }
- // Minor key must differ if different result_size_ values means different
- // code is generated.
- int MinorKey();
-
- const char* GetName() { return "CEntryStub"; }
-};
-
-
-class ApiGetterEntryStub : public CodeStub {
- public:
- ApiGetterEntryStub(Handle<AccessorInfo> info,
- ApiFunction* fun)
- : info_(info),
- fun_(fun) { }
- void Generate(MacroAssembler* masm);
- virtual bool has_custom_cache() { return true; }
- virtual bool GetCustomCache(Code** code_out);
- virtual void SetCustomCache(Code* value);
-
- static const int kStackSpace = 5;
- static const int kArgc = 4;
- private:
- Handle<AccessorInfo> info() { return info_; }
- ApiFunction* fun() { return fun_; }
- Major MajorKey() { return NoCache; }
- int MinorKey() { return 0; }
- const char* GetName() { return "ApiEntryStub"; }
- // The accessor info associated with the function.
- Handle<AccessorInfo> info_;
- // The function to be called.
- ApiFunction* fun_;
-};
-
-
-class JSEntryStub : public CodeStub {
- public:
- JSEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
- void GenerateBody(MacroAssembler* masm, bool is_construct);
-
- private:
- Major MajorKey() { return JSEntry; }
- int MinorKey() { return 0; }
-
- const char* GetName() { return "JSEntryStub"; }
-};
-
-
-class JSConstructEntryStub : public JSEntryStub {
- public:
- JSConstructEntryStub() { }
-
- void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
-
- private:
- int MinorKey() { return 1; }
-
- const char* GetName() { return "JSConstructEntryStub"; }
-};
-
-
-class ArgumentsAccessStub: public CodeStub {
- public:
- enum Type {
- READ_ELEMENT,
- NEW_OBJECT
- };
-
- explicit ArgumentsAccessStub(Type type) : type_(type) { }
-
- private:
- Type type_;
-
- Major MajorKey() { return ArgumentsAccess; }
- int MinorKey() { return type_; }
-
- void Generate(MacroAssembler* masm);
- void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewObject(MacroAssembler* masm);
-
- const char* GetName() { return "ArgumentsAccessStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("ArgumentsAccessStub (type %d)\n", type_);
- }
-#endif
-};
-
-
-class RegExpExecStub: public CodeStub {
- public:
- RegExpExecStub() { }
-
- private:
- Major MajorKey() { return RegExpExec; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpExecStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpExecStub\n");
- }
-#endif
-};
-
-
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
- : argc_(argc), in_loop_(in_loop), flags_(flags) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
- CallFunctionFlags flags_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
- argc_,
- static_cast<int>(in_loop_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
- class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
- class ArgcBits: public BitField<int, 2, 32 - 2> {};
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() {
- // Encode the parameters in a unique 32 bit value.
- return InLoopBits::encode(in_loop_)
- | FlagBits::encode(flags_)
- | ArgcBits::encode(argc_);
- }
-
- InLoopFlag InLoop() { return in_loop_; }
- bool ReceiverMightBeValue() {
- return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
- }
-
- public:
- static int ExtractArgcFromMinorKey(int minor_key) {
- return ArgcBits::decode(minor_key);
- }
-};
-
-
-enum StringIndexFlags {
- // Accepts smis or heap numbers.
- STRING_INDEX_IS_NUMBER,
-
- // Accepts smis or heap numbers that are valid array indices
- // (ECMA-262 15.4). Invalid indices are reported as being out of
- // range.
- STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
-// Generates code implementing String.prototype.charCodeAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch| and |result| are clobbered.
-class StringCharCodeAtGenerator {
- public:
- StringCharCodeAtGenerator(Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : object_(object),
- index_(index),
- scratch_(scratch),
- result_(result),
- receiver_not_string_(receiver_not_string),
- index_not_number_(index_not_number),
- index_out_of_range_(index_out_of_range),
- index_flags_(index_flags) {
- ASSERT(!scratch_.is(object_));
- ASSERT(!scratch_.is(index_));
- ASSERT(!scratch_.is(result_));
- ASSERT(!result_.is(object_));
- ASSERT(!result_.is(index_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register object_;
- Register index_;
- Register scratch_;
- Register result_;
-
- Label* receiver_not_string_;
- Label* index_not_number_;
- Label* index_out_of_range_;
-
- StringIndexFlags index_flags_;
-
- Label call_runtime_;
- Label index_not_smi_;
- Label got_smi_index_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
-};
-
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
- public:
- StringCharFromCodeGenerator(Register code,
- Register result)
- : code_(code),
- result_(result) {
- ASSERT(!code_.is(result_));
- }
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- Register code_;
- Register result_;
-
- Label slow_case_;
- Label exit_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
- StringCharAtGenerator(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* receiver_not_string,
- Label* index_not_number,
- Label* index_out_of_range,
- StringIndexFlags index_flags)
- : char_code_at_generator_(object,
- index,
- scratch1,
- scratch2,
- receiver_not_string,
- index_not_number,
- index_out_of_range,
- index_flags),
- char_from_code_generator_(scratch2, result) {}
-
- // Generates the fast case code. On the fallthrough path |result|
- // register contains the result.
- void GenerateFast(MacroAssembler* masm);
-
- // Generates the slow case code. Must not be naturally
- // reachable. Expected to be put after a ret instruction (e.g., in
- // deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
-
- private:
- StringCharCodeAtGenerator char_code_at_generator_;
- StringCharFromCodeGenerator char_from_code_generator_;
-
- DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-} // namespace internal
-} // namespace v8
+} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index ff3cb7a8..bf6d41d8 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -33,7 +33,6 @@
#include "compiler.h"
#include "data-flow.h"
#include "debug.h"
-#include "flow-graph.h"
#include "full-codegen.h"
#include "liveedit.h"
#include "oprofile-agent.h"
@@ -92,27 +91,6 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null();
}
- if (function->scope()->num_parameters() > 0 ||
- function->scope()->num_stack_slots()) {
- AssignedVariablesAnalyzer ava(function);
- ava.Analyze();
- if (ava.HasStackOverflow()) {
- return Handle<Code>::null();
- }
- }
-
- if (FLAG_use_flow_graph) {
- FlowGraphBuilder builder;
- FlowGraph* graph = builder.Build(function);
- USE(graph);
-
-#ifdef DEBUG
- if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- graph->PrintAsText(function->name());
- }
-#endif
- }
-
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not:
@@ -126,17 +104,13 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
-
- if (AlwaysFullCompiler()) {
+ bool use_full = FLAG_full_compiler && !function->contains_loops();
+ if (AlwaysFullCompiler() || (use_full && is_run_once)) {
return FullCodeGenerator::MakeCode(info);
- } else if (FLAG_full_compiler && is_run_once) {
- FullCodeGenSyntaxChecker checker;
- checker.Check(function);
- if (checker.has_supported_syntax()) {
- return FullCodeGenerator::MakeCode(info);
- }
}
+ AssignedVariablesAnalyzer ava(function);
+ if (!ava.Analyze()) return Handle<Code>::null();
return CodeGenerator::MakeCode(info);
}
@@ -442,6 +416,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// object last we avoid this.
shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code);
+ if (!info->closure().is_null()) {
+ info->closure()->set_code(*code);
+ }
// Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
@@ -490,49 +467,19 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
return Handle<SharedFunctionInfo>::null();
}
- if (literal->scope()->num_parameters() > 0 ||
- literal->scope()->num_stack_slots()) {
- AssignedVariablesAnalyzer ava(literal);
- ava.Analyze();
- if (ava.HasStackOverflow()) {
- return Handle<SharedFunctionInfo>::null();
- }
- }
-
- if (FLAG_use_flow_graph) {
- FlowGraphBuilder builder;
- FlowGraph* graph = builder.Build(literal);
- USE(graph);
-
-#ifdef DEBUG
- if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
- graph->PrintAsText(literal->name());
- }
-#endif
- }
-
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.
CompilationInfo info(literal, script, false);
bool is_run_once = literal->try_full_codegen();
- bool is_compiled = false;
-
- if (AlwaysFullCompiler()) {
+ bool use_full = FLAG_full_compiler && !literal->contains_loops();
+ if (AlwaysFullCompiler() || (use_full && is_run_once)) {
code = FullCodeGenerator::MakeCode(&info);
- is_compiled = true;
- } else if (FLAG_full_compiler && is_run_once) {
- FullCodeGenSyntaxChecker checker;
- checker.Check(literal);
- if (checker.has_supported_syntax()) {
- code = FullCodeGenerator::MakeCode(&info);
- is_compiled = true;
- }
- }
-
- if (!is_compiled) {
+ } else {
// We fall back to the classic V8 code generator.
+ AssignedVariablesAnalyzer ava(literal);
+ if (!ava.Analyze()) return Handle<SharedFunctionInfo>::null();
code = CodeGenerator::MakeCode(&info);
}
diff --git a/src/contexts.h b/src/contexts.h
index 181efc9b..78dda6a6 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -28,6 +28,9 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
+#include "heap.h"
+#include "objects.h"
+
namespace v8 {
namespace internal {
@@ -86,6 +89,7 @@ enum ContextLookupFlags {
V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
+ V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
@@ -211,6 +215,7 @@ class Context: public FixedArray {
CONFIGURE_GLOBAL_INDEX,
FUNCTION_CACHE_INDEX,
JSFUNCTION_RESULT_CACHES_INDEX,
+ NORMALIZED_MAP_CACHE_INDEX,
RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX,
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
diff --git a/src/conversions.cc b/src/conversions.cc
index 1e2bb20c..90cdc773 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -733,11 +733,18 @@ double StringToInt(String* str, int radix) {
double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
-
return InternalStringToDouble(str, end, flags, empty_string_val);
}
+double StringToDouble(Vector<const char> str,
+ int flags,
+ double empty_string_val) {
+ const char* end = str.start() + str.length();
+ return InternalStringToDouble(str.start(), end, flags, empty_string_val);
+}
+
+
extern "C" char* dtoa(double d, int mode, int ndigits,
int* decpt, int* sign, char** rve);
diff --git a/src/conversions.h b/src/conversions.h
index c4ceea6b..9e32a0cd 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -96,8 +96,12 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToDouble(String* str, int flags, double empty_string_val = 0);
+double StringToDouble(Vector<const char> str,
+ int flags,
+ double empty_string_val = 0);
+// This version expects a zero-terminated character array.
+double StringToDouble(const char* str, int flags, double empty_string_val = 0);
// Converts a string into an integer.
double StringToInt(String* str, int radix);
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 55d85825..d480c1bc 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -50,258 +50,13 @@ void BitVector::Print() {
#endif
-void AstLabeler::Label(CompilationInfo* info) {
- info_ = info;
- VisitStatements(info_->function()->body());
-}
-
-
-void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- }
-}
-
-
-void AstLabeler::VisitDeclarations(ZoneList<Declaration*>* decls) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void AstLabeler::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) {
- // Do nothing.
-}
-
-
-void AstLabeler::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
- expr->set_num(next_number_++);
- Variable* var = expr->var();
- if (var->is_global() && !var->is_this()) {
- info_->set_has_globals(true);
- }
-}
-
-
-void AstLabeler::VisitLiteral(Literal* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitAssignment(Assignment* expr) {
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->IsPropertyName());
- VariableProxy* proxy = prop->obj()->AsVariableProxy();
- USE(proxy);
- ASSERT(proxy != NULL && proxy->var()->is_this());
- info()->set_has_this_properties(true);
-
- prop->obj()->set_num(AstNode::kNoNumber);
- prop->key()->set_num(AstNode::kNoNumber);
- Visit(expr->value());
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitProperty(Property* expr) {
- ASSERT(expr->key()->IsPropertyName());
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- USE(proxy);
- ASSERT(proxy != NULL && proxy->var()->is_this());
- info()->set_has_this_properties(true);
-
- expr->obj()->set_num(AstNode::kNoNumber);
- expr->key()->set_num(AstNode::kNoNumber);
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitCall(Call* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitCountOperation(CountOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
- expr->set_num(next_number_++);
-}
-
-
-void AstLabeler::VisitCompareOperation(CompareOperation* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-
-void AstLabeler::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
- : fun_(fun),
- av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}
-
-
-void AssignedVariablesAnalyzer::Analyze() {
- ASSERT(av_.length() > 0);
+bool AssignedVariablesAnalyzer::Analyze() {
+ Scope* scope = fun_->scope();
+ int variables = scope->num_parameters() + scope->num_stack_slots();
+ if (variables == 0) return true;
+ av_.ExpandTo(variables);
VisitStatements(fun_->body());
+ return !HasStackOverflow();
}
@@ -394,7 +149,7 @@ void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
!var->is_arguments() &&
var->mode() != Variable::CONST &&
(var->is_this() || !av_.Contains(BitIndex(var)))) {
- expr->AsVariableProxy()->set_is_trivial(true);
+ expr->AsVariableProxy()->MarkAsTrivial();
}
}
@@ -489,9 +244,7 @@ void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) Visit(stmt->init());
-
if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
-
if (stmt->next() != NULL) Visit(stmt->next());
// Process loop body. After visiting the loop body av_ contains
@@ -504,7 +257,6 @@ void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (var != NULL && !av_.Contains(BitIndex(var))) {
stmt->set_loop_variable(var);
}
-
av_.Union(saved_av);
}
@@ -712,13 +464,20 @@ void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(av_.IsEmpty());
+ MarkIfTrivial(expr->expression());
Visit(expr->expression());
}
+void AssignedVariablesAnalyzer::VisitIncrementOperation(
+ IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
ASSERT(av_.IsEmpty());
-
+ if (expr->is_prefix()) MarkIfTrivial(expr->expression());
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
@@ -744,6 +503,13 @@ void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
}
+void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
+ ASSERT(av_.IsEmpty());
+ MarkIfTrivial(expr->expression());
+ Visit(expr->expression());
+}
+
+
void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
// Nothing to do.
ASSERT(av_.IsEmpty());
diff --git a/src/data-flow.h b/src/data-flow.h
index 079da65b..540db162 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -42,12 +42,10 @@ class Node;
class BitVector: public ZoneObject {
public:
- explicit BitVector(int length)
- : length_(length),
- data_length_(SizeFor(length)),
- data_(Zone::NewArray<uint32_t>(data_length_)) {
- ASSERT(length > 0);
- Clear();
+ BitVector() : length_(0), data_length_(0), data_(NULL) { }
+
+ explicit BitVector(int length) {
+ ExpandTo(length);
}
BitVector(const BitVector& other)
@@ -57,8 +55,12 @@ class BitVector: public ZoneObject {
CopyFrom(other);
}
- static int SizeFor(int length) {
- return 1 + ((length - 1) / 32);
+ void ExpandTo(int length) {
+ ASSERT(length > 0);
+ length_ = length;
+ data_length_ = SizeFor(length);
+ data_ = Zone::NewArray<uint32_t>(data_length_);
+ Clear();
}
BitVector& operator=(const BitVector& rhs) {
@@ -137,6 +139,10 @@ class BitVector: public ZoneObject {
#endif
private:
+ static int SizeFor(int length) {
+ return 1 + ((length - 1) / 32);
+ }
+
int length_;
int data_length_;
uint32_t* data_;
@@ -187,63 +193,13 @@ class WorkList BASE_EMBEDDED {
};
-struct ReachingDefinitionsData BASE_EMBEDDED {
- public:
- ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
-
- void Initialize(int definition_count) {
- rd_in_ = new BitVector(definition_count);
- kill_ = new BitVector(definition_count);
- gen_ = new BitVector(definition_count);
- }
-
- BitVector* rd_in() { return rd_in_; }
- BitVector* kill() { return kill_; }
- BitVector* gen() { return gen_; }
-
- private:
- BitVector* rd_in_;
- BitVector* kill_;
- BitVector* gen_;
-};
-
-
-// This class is used to number all expressions in the AST according to
-// their evaluation order (post-order left-to-right traversal).
-class AstLabeler: public AstVisitor {
- public:
- AstLabeler() : next_number_(0) {}
-
- void Label(CompilationInfo* info);
-
- private:
- CompilationInfo* info() { return info_; }
-
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- // Traversal number for labelling AST nodes.
- int next_number_;
-
- CompilationInfo* info_;
-
- DISALLOW_COPY_AND_ASSIGN(AstLabeler);
-};
-
-
// Computes the set of assigned variables and annotates variables proxies
// that are trivial sub-expressions and for-loops where the loop variable
// is guaranteed to be a smi.
class AssignedVariablesAnalyzer : public AstVisitor {
public:
- explicit AssignedVariablesAnalyzer(FunctionLiteral* fun);
-
- void Analyze();
+ explicit AssignedVariablesAnalyzer(FunctionLiteral* fun) : fun_(fun) { }
+ bool Analyze();
private:
Variable* FindSmiLoopVariable(ForStatement* stmt);
diff --git a/src/debug.cc b/src/debug.cc
index 1234196a..87780d35 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1007,17 +1007,18 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
- break_points_hit->SetElement(break_points_hit_count++, *o);
+ SetElement(break_points_hit, break_points_hit_count++, o);
}
}
} else {
if (CheckBreakPoint(break_point_objects)) {
- break_points_hit->SetElement(break_points_hit_count++,
- *break_point_objects);
+ SetElement(break_points_hit,
+ break_points_hit_count++,
+ break_point_objects);
}
}
- // Return undefined if no break points where triggered.
+ // Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
return Factory::undefined_value();
}
@@ -1443,7 +1444,7 @@ bool Debug::IsDebugBreak(Address addr) {
// Check whether a code stub with the specified major key is a possible break
// point location when looking for source break locations.
bool Debug::IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = code->major_key();
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction;
}
@@ -1451,7 +1452,7 @@ bool Debug::IsSourceBreakStub(Code* code) {
// Check whether a code stub with the specified major key is a possible break
// location.
bool Debug::IsBreakStub(Code* code) {
- CodeStub::Major major_key = code->major_key();
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction ||
major_key == CodeStub::StackCheck;
}
diff --git a/src/debug.h b/src/debug.h
index 98d19194..8b3b29e6 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -29,7 +29,6 @@
#define V8_DEBUG_H_
#include "assembler.h"
-#include "code-stubs.h"
#include "debug-agent.h"
#include "execution.h"
#include "factory.h"
@@ -332,8 +331,7 @@ class Debug {
k_after_break_target_address,
k_debug_break_return_address,
k_debug_break_slot_address,
- k_restarter_frame_function_pointer,
- k_register_address
+ k_restarter_frame_function_pointer
};
// Support for setting the address to jump to when returning from break point.
@@ -953,10 +951,7 @@ class DisableBreak BASE_EMBEDDED {
// code.
class Debug_Address {
public:
- Debug_Address(Debug::AddressId id, int reg = 0)
- : id_(id), reg_(reg) {
- ASSERT(reg == 0 || id == Debug::k_register_address);
- }
+ explicit Debug_Address(Debug::AddressId id) : id_(id) { }
static Debug_Address AfterBreakTarget() {
return Debug_Address(Debug::k_after_break_target_address);
@@ -970,10 +965,6 @@ class Debug_Address {
return Debug_Address(Debug::k_restarter_frame_function_pointer);
}
- static Debug_Address Register(int reg) {
- return Debug_Address(Debug::k_register_address, reg);
- }
-
Address address() const {
switch (id_) {
case Debug::k_after_break_target_address:
@@ -985,8 +976,6 @@ class Debug_Address {
case Debug::k_restarter_frame_function_pointer:
return reinterpret_cast<Address>(
Debug::restarter_frame_function_pointer_address());
- case Debug::k_register_address:
- return reinterpret_cast<Address>(Debug::register_address(reg_));
default:
UNREACHABLE();
return NULL;
@@ -994,7 +983,6 @@ class Debug_Address {
}
private:
Debug::AddressId id_;
- int reg_;
};
// The optional thread that Debug Agent may use to temporary call V8 to process
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 19cb6af7..e79421fe 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -258,11 +258,12 @@ static int DecodeIt(FILE* f,
// Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(obj)->value();
uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
- ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
+ CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+ ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ",
Code::Kind2String(kind),
- CodeStub::MajorName(code->major_key(), false));
- switch (code->major_key()) {
+ CodeStub::MajorName(major_key, false));
+ switch (major_key) {
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
break;
diff --git a/src/flags.h b/src/flags.h
index a8eca95c..f9cbde0b 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -27,8 +27,6 @@
#ifndef V8_FLAGS_H_
#define V8_FLAGS_H_
-#include "checks.h"
-
namespace v8 {
namespace internal {
diff --git a/src/flow-graph.cc b/src/flow-graph.cc
deleted file mode 100644
index 02a2cd9c..00000000
--- a/src/flow-graph.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "flow-graph.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
- ZoneList<BasicBlock*>* postorder,
- bool mark) {
- if (mark_ == mark) return;
- mark_ = mark;
- preorder->Add(this);
- if (right_successor_ != NULL) {
- right_successor_->BuildTraversalOrder(preorder, postorder, mark);
- }
- if (left_successor_ != NULL) {
- left_successor_->BuildTraversalOrder(preorder, postorder, mark);
- }
- postorder->Add(this);
-}
-
-
-FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
- // Create new entry and exit nodes. These will not change during
- // construction.
- entry_ = new BasicBlock(NULL);
- exit_ = new BasicBlock(NULL);
- // Begin accumulating instructions in the entry block.
- current_ = entry_;
-
- VisitDeclarations(lit->scope()->declarations());
- VisitStatements(lit->body());
- // In the event of stack overflow or failure to handle a syntactic
- // construct, return an invalid flow graph.
- if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
-
- // If current is not the exit, add a link to the exit.
- if (current_ != exit_) {
- // If current already has a successor (i.e., will be a branch node) and
- // if the exit already has a predecessor, insert an empty block to
- // maintain edge split form.
- if (current_->HasSuccessor() && exit_->HasPredecessor()) {
- current_ = new BasicBlock(current_);
- }
- Literal* undefined = new Literal(Factory::undefined_value());
- current_->AddInstruction(new ReturnStatement(undefined));
- exit_->AddPredecessor(current_);
- }
-
- FlowGraph* graph = new FlowGraph(entry_, exit_);
- bool mark = !entry_->GetMark();
- entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
-
-#ifdef DEBUG
- // Number the nodes in reverse postorder.
- int n = 0;
- for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
- graph->postorder()->at(i)->set_number(n++);
- }
-#endif
-
- return graph;
-}
-
-
-void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
- Variable* var = decl->proxy()->AsVariable();
- Slot* slot = var->slot();
- // We allow only declarations that do not require code generation.
- // The following all require code generation: global variables and
- // functions, variables with slot type LOOKUP, declarations with
- // mode CONST, and functions.
-
- if (var->is_global() ||
- (slot != NULL && slot->type() == Slot::LOOKUP) ||
- decl->mode() == Variable::CONST ||
- decl->fun() != NULL) {
- // Here and in the rest of the flow graph builder we indicate an
- // unsupported syntactic construct by setting the stack overflow
- // flag on the visitor. This causes bailout of the visitor.
- SetStackOverflow();
- }
-}
-
-
-void FlowGraphBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- // Build a diamond in the flow graph. First accumulate the instructions
- // of the test in the current basic block.
- Visit(stmt->condition());
-
- // Remember the branch node and accumulate the true branch as its left
- // successor. This relies on the successors being added left to right.
- BasicBlock* branch = current_;
- current_ = new BasicBlock(branch);
- Visit(stmt->then_statement());
-
- // Construct a join node and then accumulate the false branch in a fresh
- // successor of the branch node.
- BasicBlock* join = new BasicBlock(current_);
- current_ = new BasicBlock(branch);
- Visit(stmt->else_statement());
- join->AddPredecessor(current_);
-
- current_ = join;
-}
-
-
-void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
- // Build a loop in the flow graph. First accumulate the instructions of
- // the initializer in the current basic block.
- if (stmt->init() != NULL) Visit(stmt->init());
-
- // Create a new basic block for the test. This will be the join node.
- BasicBlock* join = new BasicBlock(current_);
- current_ = join;
- if (stmt->cond() != NULL) Visit(stmt->cond());
-
- // The current node is the branch node. Create a new basic block to begin
- // the body.
- BasicBlock* branch = current_;
- current_ = new BasicBlock(branch);
- Visit(stmt->body());
- if (stmt->next() != NULL) Visit(stmt->next());
-
- // Add the backward edge from the end of the body and continue with the
- // false arm of the branch.
- join->AddPredecessor(current_);
- current_ = new BasicBlock(branch);
-}
-
-
-void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitConditional(Conditional* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSlot(Slot* expr) {
- // Slots do not appear in the AST.
- UNREACHABLE();
-}
-
-
-void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitLiteral(Literal* expr) {
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
- // There are three basic kinds of assignment: variable assignments,
- // property assignments, and invalid left-hand sides (which are translated
- // to "throw ReferenceError" by the parser).
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (expr->is_compound() && !expr->target()->IsTrivial()) {
- Visit(expr->target());
- }
- if (!expr->value()->IsTrivial()) Visit(expr->value());
- current_->AddInstruction(expr);
-
- } else if (prop != NULL) {
- if (!prop->obj()->IsTrivial()) Visit(prop->obj());
- if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
- Visit(prop->key());
- }
- if (!expr->value()->IsTrivial()) Visit(expr->value());
- current_->AddInstruction(expr);
-
- } else {
- Visit(expr->target());
- }
-}
-
-
-void FlowGraphBuilder::VisitThrow(Throw* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitProperty(Property* expr) {
- if (!expr->obj()->IsTrivial()) Visit(expr->obj());
- if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
- Visit(expr->key());
- }
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCall(Call* expr) {
- Visit(expr->expression());
- VisitExpressions(expr->arguments());
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::NOT:
- case Token::BIT_NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- case Token::VOID:
- SetStackOverflow();
- break;
-
- case Token::ADD:
- case Token::SUB:
- Visit(expr->expression());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
- current_->AddInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- SetStackOverflow();
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- if (!expr->left()->IsTrivial()) Visit(expr->left());
- if (!expr->right()->IsTrivial()) Visit(expr->right());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- switch (expr->op()) {
- case Token::EQ:
- case Token::NE:
- case Token::EQ_STRICT:
- case Token::NE_STRICT:
- case Token::INSTANCEOF:
- case Token::IN:
- SetStackOverflow();
- break;
-
- case Token::LT:
- case Token::GT:
- case Token::LTE:
- case Token::GTE:
- if (!expr->left()->IsTrivial()) Visit(expr->left());
- if (!expr->right()->IsTrivial()) Visit(expr->right());
- current_->AddInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- SetStackOverflow();
-}
-
-
-#ifdef DEBUG
-
-// Print a textual representation of an instruction in a flow graph.
-class InstructionPrinter: public AstVisitor {
- public:
- InstructionPrinter() {}
-
- private:
- // Overridden from the base class.
- virtual void VisitExpressions(ZoneList<Expression*>* exprs);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
-};
-
-
-static void PrintSubexpression(Expression* expr) {
- if (!expr->IsTrivial()) {
- PrintF("@%d", expr->num());
- } else if (expr->AsLiteral() != NULL) {
- expr->AsLiteral()->handle()->Print();
- } else if (expr->AsVariableProxy() != NULL) {
- PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
- } else {
- UNREACHABLE();
- }
-}
-
-
-void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
- for (int i = 0; i < exprs->length(); ++i) {
- if (i != 0) PrintF(", ");
- PrintF("@%d", exprs->at(i)->num());
- }
-}
-
-
-// We only define printing functions for the node types that can occur as
-// instructions in a flow graph. The rest are unreachable.
-void InstructionPrinter::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitBlock(Block* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
- PrintF("return ");
- PrintSubexpression(stmt->expression());
-}
-
-
-void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitConditional(Conditional* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
- Variable* var = expr->AsVariable();
- if (var != NULL) {
- PrintF("%s", *var->name()->ToCString());
- } else {
- ASSERT(expr->AsProperty() != NULL);
- Visit(expr->AsProperty());
- }
-}
-
-
-void InstructionPrinter::VisitLiteral(Literal* expr) {
- expr->handle()->Print();
-}
-
-
-void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
-
- // Print the left-hand side.
- Visit(expr->target());
- if (var == NULL && prop == NULL) return; // Throw reference error.
- PrintF(" = ");
- // For compound assignments, print the left-hand side again and the
- // corresponding binary operator.
- if (expr->is_compound()) {
- PrintSubexpression(expr->target());
- PrintF(" %s ", Token::String(expr->binary_op()));
- }
-
- // Print the right-hand side.
- PrintSubexpression(expr->value());
-}
-
-
-void InstructionPrinter::VisitThrow(Throw* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitProperty(Property* expr) {
- PrintSubexpression(expr->obj());
- if (expr->key()->IsPropertyName()) {
- PrintF(".");
- ASSERT(expr->key()->AsLiteral() != NULL);
- expr->key()->AsLiteral()->handle()->Print();
- } else {
- PrintF("[");
- PrintSubexpression(expr->key());
- PrintF("]");
- }
-}
-
-
-void InstructionPrinter::VisitCall(Call* expr) {
- PrintF("@%d(", expr->expression()->num());
- VisitExpressions(expr->arguments());
- PrintF(")");
-}
-
-
-void InstructionPrinter::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
- UNREACHABLE();
-}
-
-
-void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
- PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
-}
-
-
-void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
- if (expr->is_prefix()) {
- PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
- } else {
- PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
- }
-}
-
-
-void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
- PrintSubexpression(expr->left());
- PrintF(" %s ", Token::String(expr->op()));
- PrintSubexpression(expr->right());
-}
-
-
-void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
- PrintSubexpression(expr->left());
- PrintF(" %s ", Token::String(expr->op()));
- PrintSubexpression(expr->right());
-}
-
-
-void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
-
-
-int BasicBlock::PrintAsText(int instruction_number) {
- // Print a label for all blocks except the entry.
- if (HasPredecessor()) {
- PrintF("L%d:", number());
- }
-
- // Number and print the instructions. Since AST child nodes are visited
- // before their parents, the parent nodes can refer to them by number.
- InstructionPrinter printer;
- for (int i = 0; i < instructions_.length(); ++i) {
- PrintF("\n%d ", instruction_number);
- instructions_[i]->set_num(instruction_number++);
- instructions_[i]->Accept(&printer);
- }
-
- // If this is the exit, print "exit". If there is a single successor,
- // print "goto" successor on a separate line. If there are two
- // successors, print "goto" successor on the same line as the last
- // instruction in the block. There is a blank line between blocks (and
- // after the last one).
- if (left_successor_ == NULL) {
- PrintF("\nexit\n\n");
- } else if (right_successor_ == NULL) {
- PrintF("\ngoto L%d\n\n", left_successor_->number());
- } else {
- PrintF(", goto (L%d, L%d)\n\n",
- left_successor_->number(),
- right_successor_->number());
- }
-
- return instruction_number;
-}
-
-
-void FlowGraph::PrintAsText(Handle<String> name) {
- PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
- // Print nodes in reverse postorder. Note that AST node numbers are used
- // during printing of instructions and thus their current values are
- // destroyed.
- int number = 0;
- for (int i = postorder_.length() - 1; i >= 0; --i) {
- number = postorder_[i]->PrintAsText(number);
- }
-}
-
-#endif // DEBUG
-
-
-} } // namespace v8::internal
diff --git a/src/flow-graph.h b/src/flow-graph.h
deleted file mode 100644
index f6af8410..00000000
--- a/src/flow-graph.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FLOW_GRAPH_H_
-#define V8_FLOW_GRAPH_H_
-
-#include "v8.h"
-
-#include "data-flow.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// The nodes of a flow graph are basic blocks. Basic blocks consist of
-// instructions represented as pointers to AST nodes in the order that they
-// would be visited by the code generator. A block can have arbitrarily many
-// (even zero) predecessors and up to two successors. Blocks with multiple
-// predecessors are "join nodes" and blocks with multiple successors are
-// "branch nodes". A block can be both a branch and a join node.
-//
-// Flow graphs are in edge split form: a branch node is never the
-// predecessor of a merge node. Empty basic blocks are inserted to maintain
-// edge split form.
-class BasicBlock: public ZoneObject {
- public:
- // Construct a basic block with a given predecessor. NULL indicates no
- // predecessor or that the predecessor will be set later.
- explicit BasicBlock(BasicBlock* predecessor)
- : predecessors_(2),
- instructions_(8),
- left_successor_(NULL),
- right_successor_(NULL),
- mark_(false) {
- if (predecessor != NULL) AddPredecessor(predecessor);
- }
-
- bool HasPredecessor() { return !predecessors_.is_empty(); }
- bool HasSuccessor() { return left_successor_ != NULL; }
-
- // Add a given basic block as a predecessor of this block. This function
- // also adds this block as a successor of the given block.
- void AddPredecessor(BasicBlock* predecessor) {
- ASSERT(predecessor != NULL);
- predecessors_.Add(predecessor);
- predecessor->AddSuccessor(this);
- }
-
- // Add an instruction to the end of this block. The block must be "open"
- // by not having a successor yet.
- void AddInstruction(AstNode* instruction) {
- ASSERT(!HasSuccessor() && instruction != NULL);
- instructions_.Add(instruction);
- }
-
- // Perform a depth-first traversal of graph rooted at this node,
- // accumulating pre- and postorder traversal orders. Visited nodes are
- // marked with mark.
- void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
- ZoneList<BasicBlock*>* postorder,
- bool mark);
- bool GetMark() { return mark_; }
-
-#ifdef DEBUG
- // In debug mode, blocks are numbered in reverse postorder to help with
- // printing.
- int number() { return number_; }
- void set_number(int n) { number_ = n; }
-
- // Print a basic block, given the number of the first instruction.
- // Returns the next number after the number of the last instruction.
- int PrintAsText(int instruction_number);
-#endif
-
- private:
- // Add a given basic block as successor to this block. This function does
- // not add this block as a predecessor of the given block so as to avoid
- // circularity.
- void AddSuccessor(BasicBlock* successor) {
- ASSERT(right_successor_ == NULL && successor != NULL);
- if (HasSuccessor()) {
- right_successor_ = successor;
- } else {
- left_successor_ = successor;
- }
- }
-
- ZoneList<BasicBlock*> predecessors_;
- ZoneList<AstNode*> instructions_;
- BasicBlock* left_successor_;
- BasicBlock* right_successor_;
-
- // Support for graph traversal. Before traversal, all nodes in the graph
- // have the same mark (true or false). Traversal marks already-visited
- // nodes with the opposite mark. After traversal, all nodes again have
- // the same mark. Traversal of the same graph is not reentrant.
- bool mark_;
-
-#ifdef DEBUG
- int number_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(BasicBlock);
-};
-
-
-// A flow graph has distinguished entry and exit blocks. The entry block is
-// the only one with no predecessors and the exit block is the only one with
-// no successors.
-class FlowGraph: public ZoneObject {
- public:
- FlowGraph(BasicBlock* entry, BasicBlock* exit)
- : entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
- }
-
- ZoneList<BasicBlock*>* preorder() { return &preorder_; }
- ZoneList<BasicBlock*>* postorder() { return &postorder_; }
-
-#ifdef DEBUG
- void PrintAsText(Handle<String> name);
-#endif
-
- private:
- BasicBlock* entry_;
- BasicBlock* exit_;
- ZoneList<BasicBlock*> preorder_;
- ZoneList<BasicBlock*> postorder_;
-};
-
-
-// The flow graph builder walks the AST adding reachable AST nodes to the
-// flow graph as instructions. It remembers the entry and exit nodes of the
-// graph, and keeps a pointer to the current block being constructed.
-class FlowGraphBuilder: public AstVisitor {
- public:
- FlowGraphBuilder() {}
-
- FlowGraph* Build(FunctionLiteral* lit);
-
- private:
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- BasicBlock* entry_;
- BasicBlock* exit_;
- BasicBlock* current_;
-
- DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_FLOW_GRAPH_H_
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 72218513..78bb646c 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -64,9 +64,8 @@ inline bool StackHandler::includes(Address address) const {
}
-inline void StackHandler::Iterate(ObjectVisitor* v) const {
- // Stack handlers do not contain any pointers that need to be
- // traversed.
+inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
+ StackFrame::IteratePc(v, pc_address(), holder);
}
@@ -81,15 +80,9 @@ inline StackHandler::State StackHandler::state() const {
}
-inline Address StackHandler::pc() const {
+inline Address* StackHandler::pc_address() const {
const int offset = StackHandlerConstants::kPCOffset;
- return Memory::Address_at(address() + offset);
-}
-
-
-inline void StackHandler::set_pc(Address value) {
- const int offset = StackHandlerConstants::kPCOffset;
- Memory::Address_at(address() + offset) = value;
+ return reinterpret_cast<Address*>(address() + offset);
}
diff --git a/src/frames.cc b/src/frames.cc
index 9f815c39..76a441b6 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -36,6 +36,11 @@
namespace v8 {
namespace internal {
+PcToCodeCache::PcToCodeCacheEntry
+ PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
+
+int SafeStackFrameIterator::active_count_ = 0;
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
@@ -88,7 +93,6 @@ StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
if (use_top || fp != NULL) {
Reset();
}
- JavaScriptFrame_.DisableHeapAccess();
}
#undef INITIALIZE_SINGLETON
@@ -201,7 +205,7 @@ bool StackTraceFrameIterator::IsValidFrame() {
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
- low_bound_(low_bound), high_bound_(high_bound),
+ maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
is_valid_top_(
IsWithinBounds(low_bound, high_bound,
Top::c_entry_fp(Top::GetCurrentThread())) &&
@@ -302,69 +306,42 @@ void SafeStackTraceFrameIterator::Advance() {
#endif
-// -------------------------------------------------------------------------
-
-
-void StackHandler::Cook(Code* code) {
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackHandler::Uncook(Code* code) {
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
-
-// -------------------------------------------------------------------------
-
-
bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler());
return !it.done();
}
-
-void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(!thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Cook();
+void StackFrame::IteratePc(ObjectVisitor* v,
+ Address* pc_address,
+ Code* holder) {
+ Address pc = *pc_address;
+ ASSERT(holder->contains(pc));
+ unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
+ Object* code = holder;
+ v->VisitPointer(&code);
+ if (code != holder) {
+ holder = reinterpret_cast<Code*>(code);
+ pc = holder->instruction_start() + pc_offset;
+ *pc_address = pc;
}
- thread->set_stack_is_cooked(true);
}
-void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Uncook();
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
}
- thread->set_stack_is_cooked(false);
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
-void StackFrame::Cook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Cook(code);
- }
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackFrame::Uncook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Uncook(code);
- }
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
@@ -425,6 +402,14 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
}
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
+ IteratePc(v, pc_address(), code());
+ v->VisitPointer(&code_slot());
+}
+
+
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
@@ -499,6 +484,49 @@ Code* JavaScriptFrame::unchecked_code() const {
}
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC ||
+ SafeStackFrameIterator::is_active()) {
+ // If the we are currently iterating the safe stack the
+ // arguments for frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when objects may have been marked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
}
@@ -694,13 +722,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
- handler->Iterate(v);
- // Make sure that there's the entry frame does not contain more than
- // one stack handler.
+ handler->Iterate(v, code());
#ifdef DEBUG
+ // Make sure that the entry frame does not contain more than one
+ // stack handler.
it.Advance();
ASSERT(it.done());
#endif
+ IteratePc(v, pc_address(), code());
}
@@ -717,7 +746,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
- handler->Iterate(v);
+ handler->Iterate(v, code());
}
v->VisitPointers(base, limit);
}
@@ -725,6 +754,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
// Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
@@ -739,6 +769,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
}
@@ -760,6 +791,56 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
+Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+ Code* code = reinterpret_cast<Code*>(object);
+ ASSERT(code != NULL && code->contains(pc));
+ return code;
+}
+
+
+Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+ // Check if the pc points into a large object chunk.
+ LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
+ if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+
+ // Iterate through the 8K page until we reach the end or find an
+ // object starting after the pc.
+ Page* page = Page::FromAddress(pc);
+ HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
+ HeapObject* previous = NULL;
+ while (true) {
+ HeapObject* next = iterator.next();
+ if (next == NULL || next->address() >= pc) {
+ return GcSafeCastToCode(previous, pc);
+ }
+ previous = next;
+ }
+}
+
+PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+ Counters::pc_to_code.Increment();
+ ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+ uint32_t hash = ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
+ uint32_t index = hash & (kPcToCodeCacheSize - 1);
+ PcToCodeCacheEntry* entry = cache(index);
+ if (entry->pc == pc) {
+ Counters::pc_to_code_cached.Increment();
+ ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+ } else {
+ // Because this code may be interrupted by a profiling signal that
+ // also queries the cache, we cannot update pc before the code has
+ // been set. Otherwise, we risk trying to use a cache entry before
+ // the code has been computed.
+ entry->code = GcSafeFindCodeForPc(pc);
+ entry->pc = pc;
+ }
+ return entry;
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
int n = 0;
while (reglist != 0) {
diff --git a/src/frames.h b/src/frames.h
index cb791d2d..20111904 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -46,6 +46,32 @@ class Top;
class ThreadLocalTop;
+class PcToCodeCache : AllStatic {
+ public:
+ struct PcToCodeCacheEntry {
+ Address pc;
+ Code* code;
+ };
+
+ static PcToCodeCacheEntry* cache(int index) {
+ return &cache_[index];
+ }
+
+ static Code* GcSafeFindCodeForPc(Address pc);
+ static Code* GcSafeCastToCode(HeapObject* object, Address pc);
+
+ static void FlushPcToCodeCache() {
+ memset(&cache_[0], 0, sizeof(cache_));
+ }
+
+ static PcToCodeCacheEntry* GetCacheEntry(Address pc);
+
+ private:
+ static const int kPcToCodeCacheSize = 256;
+ static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+};
+
+
class StackHandler BASE_EMBEDDED {
public:
enum State {
@@ -64,7 +90,7 @@ class StackHandler BASE_EMBEDDED {
inline bool includes(Address address) const;
// Garbage collection support.
- inline void Iterate(ObjectVisitor* v) const;
+ inline void Iterate(ObjectVisitor* v, Code* holder) const;
// Conversion support.
static inline StackHandler* FromAddress(Address address);
@@ -74,16 +100,11 @@ class StackHandler BASE_EMBEDDED {
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
- // Garbage collection support.
- void Cook(Code* code);
- void Uncook(Code* code);
-
private:
// Accessors.
inline State state() const;
- inline Address pc() const;
- inline void set_pc(Address value);
+ inline Address* pc_address() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
@@ -162,15 +183,16 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- inline Code* code() const {
- return Code::cast(unchecked_code());
+ Code* code() const { return GetContainingCode(pc()); }
+
+ // Get the code object that contains the given pc.
+ Code* GetContainingCode(Address pc) const {
+ return PcToCodeCache::GetCacheEntry(pc)->code;
}
- // Garbage collection support.
- static void CookFramesForThread(ThreadLocalTop* thread);
- static void UncookFramesForThread(ThreadLocalTop* thread);
+ virtual void Iterate(ObjectVisitor* v) const = 0;
+ static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
- virtual void Iterate(ObjectVisitor* v) const { }
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
@@ -212,10 +234,6 @@ class StackFrame BASE_EMBEDDED {
// Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const;
- // Cooking/uncooking support.
- void Cook();
- void Uncook();
-
friend class StackFrameIterator;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
@@ -280,7 +298,6 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
- enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; }
virtual Code* unchecked_code() const;
@@ -418,19 +435,11 @@ class JavaScriptFrame: public StandardFrame {
protected:
explicit JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator), disable_heap_access_(false) { }
+ : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
- // When this mode is enabled it is not allowed to access heap objects.
- // This is a special mode used when gathering stack samples in profiler.
- // A shortcoming is that caller's SP value will be calculated incorrectly
- // (see GetCallerStackPointer implementation), but it is not used for stack
- // sampling.
- void DisableHeapAccess() { disable_heap_access_ = true; }
-
private:
- bool disable_heap_access_;
inline Object* function_slot_object() const;
friend class StackFrameIterator;
@@ -637,6 +646,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
+ static bool is_active() { return active_count_ > 0; }
+
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
@@ -650,6 +661,19 @@ class SafeStackFrameIterator BASE_EMBEDDED {
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
+ // This is a nasty hack to make sure the active count is incremented
+ // before the constructor for the embedded iterator is invoked. This
+ // is needed because the constructor will start looking at frames
+ // right away and we need to make sure it doesn't start inspecting
+ // heap objects.
+ class ActiveCountMaintainer BASE_EMBEDDED {
+ public:
+ ActiveCountMaintainer() { active_count_++; }
+ ~ActiveCountMaintainer() { active_count_--; }
+ };
+
+ ActiveCountMaintainer maintainer_;
+ static int active_count_;
Address low_bound_;
Address high_bound_;
const bool is_valid_top_;
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index cd5db80a..59cbad95 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -30,6 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "full-codegen.h"
+#include "macro-assembler.h"
#include "scopes.h"
#include "stub-cache.h"
#include "debug.h"
@@ -38,407 +39,6 @@
namespace v8 {
namespace internal {
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
- VisitDeclarations(scope->declarations());
- CHECK_BAILOUT;
-
- VisitStatements(fun->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); i++) {
- Visit(decls->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- Visit(prop->obj());
- Visit(prop->key());
- }
-
- if (decl->fun() != NULL) {
- Visit(decl->fun());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitExpressionStatement(
- ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
- CHECK_BAILOUT;
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- Visit(stmt->else_statement());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
- if (!FLAG_always_full_compiler) BAILOUT("ForStatement");
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- CHECK_BAILOUT;
- }
- if (stmt->cond() != NULL) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- }
- Visit(stmt->body());
- if (stmt->next() != NULL) {
- CHECK_BAILOUT;
- Visit(stmt->next());
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->catch_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
- TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
- DebuggerStatement* stmt) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- BAILOUT("SharedFunctionInfoLiteral");
-}
-
-
-void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
- Visit(expr->condition());
- CHECK_BAILOUT;
- Visit(expr->then_expression());
- CHECK_BAILOUT;
- Visit(expr->else_expression());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
- // Supported.
-}
-
-
-void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
- for (int i = 0, len = properties->length(); i < len; i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (property->IsCompileTimeValue()) continue;
- Visit(property->key());
- CHECK_BAILOUT;
- Visit(property->value());
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- if (subexpr->AsLiteral() != NULL) continue;
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
- Visit(expr->key());
- CHECK_BAILOUT;
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
- Token::Value op = expr->op();
- if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
- // All other variables are supported.
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("non-variable/non-property assignment");
- }
-
- Visit(expr->value());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
- Visit(expr->exception());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
- Visit(expr->obj());
- CHECK_BAILOUT;
- Visit(expr->key());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- // Check for supported calls
- if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("call to the identifier 'eval'");
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Calls to global variables are supported.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- BAILOUT("call to a lookup slot");
- } else if (fun->AsProperty() != NULL) {
- Property* prop = fun->AsProperty();
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // Otherwise the call is supported if the function expression is.
- Visit(fun);
- }
- // Check all arguments to the call.
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
- Visit(expr->expression());
- CHECK_BAILOUT;
- ZoneList<Expression*>* args = expr->arguments();
- // Check all arguments to the call
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
- // Check for inline runtime call
- if (expr->name()->Get(0) == '_' &&
- CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("inlined runtime call");
- }
- // Check all arguments to the call. (Relies on TEMP meaning STACK.)
- for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::ADD:
- case Token::BIT_NOT:
- case Token::NOT:
- case Token::SUB:
- case Token::TYPEOF:
- case Token::VOID:
- Visit(expr->expression());
- break;
- case Token::DELETE:
- BAILOUT("UnaryOperation: DELETE");
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- Property* prop = expr->expression()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("CountOperation with lookup slot");
- }
- }
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("CountOperation non-variable/non-property expression");
- }
-}
-
-
-void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
- // Supported.
-}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
void BreakableStatementChecker::Check(Statement* stmt) {
Visit(stmt);
}
@@ -616,6 +216,12 @@ void BreakableStatementChecker::VisitThrow(Throw* expr) {
}
+void BreakableStatementChecker::VisitIncrementOperation(
+ IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void BreakableStatementChecker::VisitProperty(Property* expr) {
// Property load is breakable.
is_breakable_ = true;
@@ -654,6 +260,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
+void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
+ Visit(expr->expression());
+}
+
+
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@@ -707,6 +318,46 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
}
+bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
+ // TODO(kasperl): Once the compare stub allows leaving out the
+ // inlined smi case, we should get rid of this check.
+ if (Token::IsCompareOp(op)) return true;
+ // TODO(kasperl): Once the unary bit not stub allows leaving out
+ // the inlined smi case, we should get rid of this check.
+ if (op == Token::BIT_NOT) return true;
+ // Inline smi case inside loops, but not division and modulo which
+ // are too complicated and take up too much space.
+ return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
+}
+
+
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+ Label* materialize_false,
+ Label** if_true,
+ Label** if_false,
+ Label** fall_through) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ // In an effect context, the true and the false case branch to the
+ // same label.
+ *if_true = *if_false = *fall_through = materialize_true;
+ break;
+ case Expression::kValue:
+ *if_true = *fall_through = materialize_true;
+ *if_false = materialize_false;
+ break;
+ case Expression::kTest:
+ *if_true = true_label_;
+ *if_false = false_label_;
+ *fall_through = fall_through_;
+ break;
+ }
+}
+
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@@ -858,10 +509,75 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Emit##name(expr->arguments()); \
return; \
}
-
INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
- UNREACHABLE();
#undef CHECK_EMIT_INLINE_CALL
+ UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left->ResultOverwriteAllowed()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right->ResultOverwriteAllowed()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ switch (op) {
+ case Token::COMMA:
+ VisitForEffect(left);
+ Visit(right);
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ // Figure out if either of the operands is a constant.
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, left, right)
+ : kNoConstants;
+
+ // Load only the operands that we need to materialize.
+ if (constant == kNoConstants) {
+ VisitForValue(left, kStack);
+ VisitForValue(right, kAccumulator);
+ } else if (constant == kRightConstant) {
+ VisitForValue(left, kAccumulator);
+ } else {
+ ASSERT(constant == kLeftConstant);
+ VisitForValue(right, kAccumulator);
+ }
+
+ SetSourcePosition(expr->position());
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant);
+ } else {
+ EmitBinaryOp(op, context_, mode);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
}
@@ -876,25 +592,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- VisitForControl(expr->left(), &done, &eval_right);
+ VisitForControl(expr->left(), &done, &eval_right, &eval_right);
break;
case Expression::kValue:
- VisitForValueControl(expr->left(),
- location_,
- &done,
- &eval_right);
+ VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
- VisitForControl(expr->left(), true_label_, &eval_right);
- break;
- case Expression::kValueTest:
- VisitForValueControl(expr->left(),
- location_,
- true_label_,
- &eval_right);
- break;
- case Expression::kTestValue:
- VisitForControl(expr->left(), true_label_, &eval_right);
+ VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
break;
}
} else {
@@ -903,25 +607,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- VisitForControl(expr->left(), &eval_right, &done);
+ VisitForControl(expr->left(), &eval_right, &done, &eval_right);
break;
case Expression::kValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- &done);
+ VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kValueTest:
- VisitForControl(expr->left(), &eval_right, false_label_);
- break;
- case Expression::kTestValue:
- VisitForControlValue(expr->left(),
- location_,
- &eval_right,
- false_label_);
+ VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
break;
}
}
@@ -933,6 +625,43 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
}
+void FullCodeGenerator::VisitLogicalForValue(Expression* expr,
+ Token::Value op,
+ Location where,
+ Label* done) {
+ ASSERT(op == Token::AND || op == Token::OR);
+ VisitForValue(expr, kAccumulator);
+ __ push(result_register());
+
+ Label discard;
+ switch (where) {
+ case kAccumulator: {
+ Label restore;
+ if (op == Token::OR) {
+ DoTest(&restore, &discard, &restore);
+ } else {
+ DoTest(&discard, &restore, &restore);
+ }
+ __ bind(&restore);
+ __ pop(result_register());
+ __ jmp(done);
+ break;
+ }
+ case kStack: {
+ if (op == Token::OR) {
+ DoTest(done, &discard, &discard);
+ } else {
+ DoTest(&discard, done, &discard);
+ }
+ break;
+ }
+ }
+
+ __ bind(&discard);
+ __ Drop(1);
+}
+
+
void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
@@ -960,16 +689,19 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
SetStatementPosition(stmt);
Label then_part, else_part, done;
- // Do not worry about optimizing for empty then or else bodies.
- VisitForControl(stmt->condition(), &then_part, &else_part);
-
- __ bind(&then_part);
- Visit(stmt->then_statement());
- __ jmp(&done);
-
- __ bind(&else_part);
- Visit(stmt->else_statement());
+ if (stmt->HasElseStatement()) {
+ VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+ } else {
+ VisitForControl(stmt->condition(), &then_part, &done, &then_part);
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ }
__ bind(&done);
}
@@ -1057,7 +789,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
- Label body, stack_limit_hit, stack_check_success;
+ Label body, stack_limit_hit, stack_check_success, done;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1069,21 +801,24 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
+ // Record the position of the do while condition and make sure it is
+ // possible to break on the condition.
__ bind(loop_statement.continue_target());
-
- // Record the position of the do while condition and make sure it is possible
- // to break on the condition.
SetExpressionPosition(stmt->cond(), stmt->condition_position());
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ __ bind(loop_statement.break_target());
+ __ jmp(&done);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(loop_statement.break_target());
-
+ __ bind(&done);
decrement_loop_depth();
}
@@ -1098,24 +833,27 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
// Emit the test at the bottom of the loop.
__ jmp(loop_statement.continue_target());
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
__ bind(&body);
Visit(stmt->body());
-
__ bind(loop_statement.continue_target());
- // Emit the statement position here as this is where the while statement code
- // starts.
+
+ // Emit the statement position here as this is where the while
+ // statement code starts.
SetStatementPosition(stmt);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
-
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
__ bind(loop_statement.break_target());
decrement_loop_depth();
@@ -1135,6 +873,11 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
__ bind(&body);
Visit(stmt->body());
@@ -1146,8 +889,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
__ bind(&test);
- // Emit the statement position here as this is where the for statement code
- // starts.
+ // Emit the statement position here as this is where the for
+ // statement code starts.
SetStatementPosition(stmt);
// Check stack before looping.
@@ -1155,16 +898,14 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ bind(&stack_check_success);
if (stmt->cond() != NULL) {
- VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ VisitForControl(stmt->cond(),
+ &body,
+ loop_statement.break_target(),
+ loop_statement.break_target());
} else {
__ jmp(&body);
}
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
__ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -1291,7 +1032,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
- VisitForControl(expr->condition(), &true_case, &false_case);
+ VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
@@ -1363,6 +1104,11 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
}
+void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
@@ -1379,6 +1125,14 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
return 0;
}
+
+void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ Apply(context_, result_register());
+}
+
#undef __
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 9aab3d56..840c8250 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -36,29 +36,6 @@
namespace v8 {
namespace internal {
-class FullCodeGenSyntaxChecker: public AstVisitor {
- public:
- FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
-
- void Check(FunctionLiteral* fun);
-
- bool has_supported_syntax() { return has_supported_syntax_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
-};
-
-
// AST node visitor which can tell whether a given statement will be breakable
// when the code is compiled by the full compiler in the debugger. This means
// that there will be an IC (load/store/call) in the code generated for the
@@ -96,7 +73,8 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_(0),
location_(kStack),
true_label_(NULL),
- false_label_(NULL) {
+ false_label_(NULL),
+ fall_through_(NULL) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
@@ -259,8 +237,25 @@ class FullCodeGenerator: public AstVisitor {
kStack
};
+ enum ConstantOperand {
+ kNoConstants,
+ kLeftConstant,
+ kRightConstant
+ };
+
+ // Compute the frame pointer relative offset for a given local or
+ // parameter slot.
int SlotOffset(Slot* slot);
+ // Determine whether or not to inline the smi case for the given
+ // operation.
+ bool ShouldInlineSmiCase(Token::Value op);
+
+ // Compute which (if any) of the operands is a compile-time constant.
+ ConstantOperand GetConstantOperand(Token::Value op,
+ Expression* left,
+ Expression* right);
+
// Emit code to convert a pure value (in a register, slot, as a literal,
// or on top of the stack) into the result expected according to an
// expression context.
@@ -281,7 +276,8 @@ class FullCodeGenerator: public AstVisitor {
void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
- Label** if_false);
+ Label** if_false,
+ Label** fall_through);
// Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context.
@@ -296,7 +292,14 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
- void DoTest(Expression::Context context);
+ void DoTest(Label* if_true, Label* if_false, Label* fall_through);
+
+ // Helper function to split control flow and avoid a branch to the
+ // fall-through label if it is set up.
+ void Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
@@ -323,60 +326,38 @@ class FullCodeGenerator: public AstVisitor {
location_ = saved_location;
}
- void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+ void VisitForControl(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
Expression::Context saved_context = context_;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
+ Label* saved_fall_through = fall_through_;
context_ = Expression::kTest;
true_label_ = if_true;
false_label_ = if_false;
+ fall_through_ = fall_through;
Visit(expr);
context_ = saved_context;
true_label_ = saved_true;
false_label_ = saved_false;
- }
-
- void VisitForValueControl(Expression* expr,
- Location where,
- Label* if_true,
- Label* if_false) {
- Expression::Context saved_context = context_;
- Location saved_location = location_;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- context_ = Expression::kValueTest;
- location_ = where;
- true_label_ = if_true;
- false_label_ = if_false;
- Visit(expr);
- context_ = saved_context;
- location_ = saved_location;
- true_label_ = saved_true;
- false_label_ = saved_false;
- }
-
- void VisitForControlValue(Expression* expr,
- Location where,
- Label* if_true,
- Label* if_false) {
- Expression::Context saved_context = context_;
- Location saved_location = location_;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- context_ = Expression::kTestValue;
- location_ = where;
- true_label_ = if_true;
- false_label_ = if_false;
- Visit(expr);
- context_ = saved_context;
- location_ = saved_location;
- true_label_ = saved_true;
- false_label_ = saved_false;
+ fall_through_ = saved_fall_through;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Try to perform a comparison as a fast inlined literal compare if
+ // the operands allow it. Returns true if the compare operations
+ // has been matched and all code generated; false otherwise.
+ bool TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(Variable* variable,
@@ -391,7 +372,6 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
-
// Platform-specific code for inline runtime calls.
void EmitInlineRuntimeCall(CallRuntime* expr);
@@ -419,7 +399,50 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
- void EmitBinaryOp(Token::Value op, Expression::Context context);
+ void EmitBinaryOp(Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode);
+
+ // Helper functions for generating inlined smi code for certain
+ // binary operations.
+ void EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant);
+
+ void EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
+
+ void EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value);
+
+ void EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value);
+
+ void EmitConstantSmiAdd(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
+
+ void EmitConstantSmiSub(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value);
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator.
@@ -440,14 +463,6 @@ class FullCodeGenerator: public AstVisitor {
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
- // Helper for compare operations. Expects the null-value in a register.
- void EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch);
-
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
@@ -492,6 +507,14 @@ class FullCodeGenerator: public AstVisitor {
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
+ void VisitForTypeofValue(Expression* expr, Location where);
+
+ void VisitLogicalForValue(Expression* expr,
+ Token::Value op,
+ Location where,
+ Label* done);
+
+
MacroAssembler* masm_;
CompilationInfo* info_;
@@ -503,6 +526,7 @@ class FullCodeGenerator: public AstVisitor {
Location location_;
Label* true_label_;
Label* false_label_;
+ Label* fall_through_;
friend class NestedStatement;
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index 2d6a86a6..f12d026b 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -44,6 +44,20 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
}
+void FuncNameInferrer::PushLiteralName(Handle<String> name) {
+ if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) {
+ names_stack_.Add(name);
+ }
+}
+
+
+void FuncNameInferrer::PushVariableName(Handle<String> name) {
+ if (IsOpen() && !Heap::result_symbol()->Equals(*name)) {
+ names_stack_.Add(name);
+ }
+}
+
+
Handle<String> FuncNameInferrer::MakeNameFromStack() {
if (names_stack_.is_empty()) {
return Factory::empty_string();
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index e88586a4..a35034ec 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -36,11 +36,12 @@ namespace internal {
// Inference is performed in cases when an anonymous function is assigned
// to a variable or a property (see test-func-name-inference.cc for examples.)
//
-// The basic idea is that during AST traversal LHSs of expressions are
-// always visited before RHSs. Thus, during visiting the LHS, a name can be
-// collected, and during visiting the RHS, a function literal can be collected.
-// Inference is performed while leaving the assignment node.
-class FuncNameInferrer BASE_EMBEDDED {
+// The basic idea is that during parsing of LHSs of certain expressions
+// (assignments, declarations, object literals) we collect name strings,
+// and during parsing of the RHS, a function literal can be collected. After
+// parsing the RHS we can infer a name for function literals that do not have
+// a name.
+class FuncNameInferrer : public ZoneObject {
public:
FuncNameInferrer()
: entries_stack_(10),
@@ -61,11 +62,9 @@ class FuncNameInferrer BASE_EMBEDDED {
}
// Pushes an encountered name onto names stack when in collection state.
- void PushName(Handle<String> name) {
- if (IsOpen()) {
- names_stack_.Add(name);
- }
- }
+ void PushLiteralName(Handle<String> name);
+
+ void PushVariableName(Handle<String> name);
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
@@ -75,11 +74,16 @@ class FuncNameInferrer BASE_EMBEDDED {
}
// Infers a function name and leaves names collection state.
- void InferAndLeave() {
+ void Infer() {
ASSERT(IsOpen());
if (!funcs_to_infer_.is_empty()) {
InferFunctionsNames();
}
+ }
+
+ // Infers a function name and leaves names collection state.
+ void Leave() {
+ ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
@@ -102,34 +106,6 @@ class FuncNameInferrer BASE_EMBEDDED {
};
-// A wrapper class that automatically calls InferAndLeave when
-// leaving scope.
-class ScopedFuncNameInferrer BASE_EMBEDDED {
- public:
- explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
- : inferrer_(inferrer),
- is_entered_(false) {}
-
- ~ScopedFuncNameInferrer() {
- if (is_entered_) {
- inferrer_->InferAndLeave();
- }
- }
-
- // Triggers the wrapped inferrer into name collection state.
- void Enter() {
- inferrer_->Enter();
- is_entered_ = true;
- }
-
- private:
- FuncNameInferrer* inferrer_;
- bool is_entered_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedFuncNameInferrer);
-};
-
-
} } // namespace v8::internal
#endif // V8_FUNC_NAME_INFERRER_H_
diff --git a/src/globals.h b/src/globals.h
index 3fe9e240..f168d6eb 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -244,10 +244,12 @@ const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
+const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+const uint32_t kDebugZapValue = 0xbadbaddb;
#endif
@@ -662,7 +664,7 @@ F FUNCTION_CAST(Address addr) {
#define TRACK_MEMORY(name)
#endif
-// define used for helping GCC to make better inlining. Don't bother for debug
+// Define used for helping GCC to make better inlining. Don't bother for debug
// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
// errors in debug build.
#if defined(__GNUC__) && !defined(DEBUG)
@@ -678,6 +680,14 @@ F FUNCTION_CAST(Address addr) {
#define NO_INLINE(header) header
#endif
+
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
+#else
+#define MUST_USE_RESULT
+#endif
+
+
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 656c5546..0d1ad5ad 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -28,7 +28,8 @@
#ifndef V8_HEAP_INL_H_
#define V8_HEAP_INL_H_
-#include "log.h"
+#include "heap.h"
+#include "objects.h"
#include "v8-counters.h"
namespace v8 {
diff --git a/src/heap.cc b/src/heap.cc
index ff92384c..443c926d 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -104,6 +104,7 @@ List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL;
+HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
@@ -193,6 +194,33 @@ bool Heap::HasBeenSetup() {
}
+int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return object->SizeFromMap(map_word.ToMap());
+}
+
+
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(MarkCompactCollector::are_map_pointers_encoded());
+ uint32_t marker = Memory::uint32_at(object->address());
+ if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+ return kIntSize;
+ } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+ return Memory::int_at(object->address() + kIntSize);
+ } else {
+ MapWord map_word = object->map_word();
+ Address map_address = map_word.DecodeMapAddress(Heap::map_space());
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+ return object->SizeFromMap(map);
+ }
+}
+
+
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
@@ -540,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RelinkPageListInChunkOrder(true);
+ }
+
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -571,6 +606,22 @@ void Heap::ClearJSFunctionResultCaches() {
}
+class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
+ virtual void VisitThread(ThreadLocalTop* top) {
+ Context* context = top->context_;
+ if (context == NULL) return;
+ context->global()->global_context()->normalized_map_cache()->Clear();
+ }
+};
+
+
+void Heap::ClearNormalizedMapCaches() {
+ if (Bootstrapper::IsActive()) return;
+ ClearThreadNormalizedMapCachesVisitor visitor;
+ ThreadManager::IterateArchivedThreads(&visitor);
+}
+
+
#ifdef DEBUG
enum PageWatermarkValidity {
@@ -726,8 +777,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
MarkCompactCollector::CollectGarbage();
- MarkCompactEpilogue(is_compacting);
-
LOG(ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
@@ -749,18 +798,11 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
CompilationCache::MarkCompactPrologue();
- Top::MarkCompactPrologue(is_compacting);
- ThreadManager::MarkCompactPrologue(is_compacting);
-
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
-}
-
-void Heap::MarkCompactEpilogue(bool is_compacting) {
- Top::MarkCompactEpilogue(is_compacting);
- ThreadManager::MarkCompactEpilogue(is_compacting);
+ ClearNormalizedMapCaches();
}
@@ -4031,6 +4073,8 @@ bool Heap::Setup(bool create_heap_objects) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
+ MarkMapPointersAsEncoded(false);
+
// Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
diff --git a/src/heap.h b/src/heap.h
index 45fee175..484cd22b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -30,6 +30,7 @@
#include <math.h>
+#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
@@ -313,61 +314,64 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateJSObject(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateGlobalObject(JSFunction* constructor);
+ MUST_USE_RESULT static Object* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- static Object* CopyJSObject(JSObject* source);
+ MUST_USE_RESULT static Object* CopyJSObject(JSObject* source);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFunctionPrototype(JSFunction* function);
+ MUST_USE_RESULT static Object* AllocateFunctionPrototype(
+ JSFunction* function);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
// object that has been freshly allocated using the constructor.
- static Object* ReinitializeJSGlobalProxy(JSFunction* constructor,
- JSGlobalProxy* global);
+ MUST_USE_RESULT static Object* ReinitializeJSGlobalProxy(
+ JSFunction* constructor,
+ JSGlobalProxy* global);
// Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSObjectFromMap(Map* map,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateJSObjectFromMap(
+ Map* map, PretenureFlag pretenure = NOT_TENURED);
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT static Object* Allocate(Map* map, AllocationSpace space);
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* AllocateMap(InstanceType instance_type, int instance_size);
+ MUST_USE_RESULT static Object* AllocateMap(InstanceType instance_type,
+ int instance_size);
// Allocates a partial map for bootstrapping.
- static Object* AllocatePartialMap(InstanceType instance_type,
- int instance_size);
+ MUST_USE_RESULT static Object* AllocatePartialMap(InstanceType instance_type,
+ int instance_size);
// Allocate a map for the specified function
- static Object* AllocateInitialMap(JSFunction* fun);
+ MUST_USE_RESULT static Object* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache.
- static Object* AllocateCodeCache();
+ MUST_USE_RESULT static Object* AllocateCodeCache();
// Clear the Instanceof cache (used when a prototype changes).
static void ClearInstanceofCache() {
@@ -392,13 +396,13 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateStringFromAscii(
+ MUST_USE_RESULT static Object* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateStringFromUtf8(
+ MUST_USE_RESULT static Object* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateStringFromTwoByte(
+ MUST_USE_RESULT static Object* AllocateStringFromTwoByte(
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
@@ -406,16 +410,15 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static inline Object* AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT static inline Object* AllocateSymbol(Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT static Object* AllocateInternalSymbol(
+ unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
- static Object* AllocateExternalSymbol(Vector<const char> str,
- int chars);
+ MUST_USE_RESULT static Object* AllocateExternalSymbol(Vector<const char> str,
+ int chars);
// Allocates and partially initializes a String. There are two String
@@ -425,10 +428,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateRawAsciiString(
+ MUST_USE_RESULT static Object* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- static Object* AllocateRawTwoByteString(
+ MUST_USE_RESULT static Object* AllocateRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -436,97 +439,103 @@ class Heap : public AllStatic {
// A cache is used for ascii codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection.
- static Object* LookupSingleCharacterStringFromCode(uint16_t code);
+ MUST_USE_RESULT static Object* LookupSingleCharacterStringFromCode(
+ uint16_t code);
// Allocate a byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateByteArray(int length, PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateByteArray(int length,
+ PretenureFlag pretenure);
// Allocate a non-tenured byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateByteArray(int length);
+ MUST_USE_RESULT static Object* AllocateByteArray(int length);
// Allocate a pixel array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocatePixelArray(int length,
- uint8_t* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocatePixelArray(int length,
+ uint8_t* external_pointer,
+ PretenureFlag pretenure);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateExternalArray(int length,
- ExternalArrayType array_type,
- void* external_pointer,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateExternalArray(
+ int length,
+ ExternalArrayType array_type,
+ void* external_pointer,
+ PretenureFlag pretenure);
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateJSGlobalPropertyCell(Object* value);
+ MUST_USE_RESULT static Object* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateFixedArray(int length,
+ PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values
- static Object* AllocateFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateUninitializedFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
- static Object* CopyFixedArray(FixedArray* src);
+ MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src);
// Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFixedArrayWithHoles(
+ MUST_USE_RESULT static Object* AllocateFixedArrayWithHoles(
int length,
PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
- static Object* AllocateHashTable(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateHashTable(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
- static Object* AllocateGlobalContext();
+ MUST_USE_RESULT static Object* AllocateGlobalContext();
// Allocate a function context.
- static Object* AllocateFunctionContext(int length, JSFunction* closure);
+ MUST_USE_RESULT static Object* AllocateFunctionContext(int length,
+ JSFunction* closure);
// Allocate a 'with' context.
- static Object* AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context);
+ MUST_USE_RESULT static Object* AllocateWithContext(Context* previous,
+ JSObject* extension,
+ bool is_catch_context);
// Allocates a new utility object in the old generation.
- static Object* AllocateStruct(InstanceType type);
+ MUST_USE_RESULT static Object* AllocateStruct(InstanceType type);
// Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateFunction(Map* function_map,
- SharedFunctionInfo* shared,
- Object* prototype,
- PretenureFlag pretenure = TENURED);
+ MUST_USE_RESULT static Object* AllocateFunction(
+ Map* function_map,
+ SharedFunctionInfo* shared,
+ Object* prototype,
+ PretenureFlag pretenure = TENURED);
// Indicies for direct access into argument objects.
static const int kArgumentsObjectSize =
@@ -538,47 +547,52 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateArgumentsObject(Object* callee, int length);
+ MUST_USE_RESULT static Object* AllocateArgumentsObject(Object* callee,
+ int length);
// Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_)
- static Object* NumberFromDouble(double value,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* NumberFromDouble(
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Allocated a HeapNumber from value.
- static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
- static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED
+ MUST_USE_RESULT static Object* AllocateHeapNumber(double value,
+ PretenureFlag pretenure);
+ // pretenure = NOT_TENURED.
+ MUST_USE_RESULT static Object* AllocateHeapNumber(double value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static inline Object* NumberFromInt32(int32_t value);
+ MUST_USE_RESULT static inline Object* NumberFromInt32(int32_t value);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static inline Object* NumberFromUint32(uint32_t value);
+ MUST_USE_RESULT static inline Object* NumberFromUint32(uint32_t value);
// Allocates a new proxy object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateProxy(Address proxy,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateProxy(
+ Address proxy,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates a new SharedFunctionInfo object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateSharedFunctionInfo(Object* name);
+ MUST_USE_RESULT static Object* AllocateSharedFunctionInfo(Object* name);
// Allocates a new cons string object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateConsString(String* first, String* second);
+ MUST_USE_RESULT static Object* AllocateConsString(String* first,
+ String* second);
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
@@ -586,19 +600,20 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* AllocateSubString(
+ String* buffer,
+ int start,
+ int end,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocate a new external string object, which is backed by a string
// resource that resides outside the V8 heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- static Object* AllocateExternalStringFromAscii(
+ MUST_USE_RESULT static Object* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
- static Object* AllocateExternalStringFromTwoByte(
+ MUST_USE_RESULT static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
@@ -610,9 +625,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static inline Object* AllocateRaw(int size_in_bytes,
- AllocationSpace space,
- AllocationSpace retry_space);
+ MUST_USE_RESULT static inline Object* AllocateRaw(
+ int size_in_bytes,
+ AllocationSpace space,
+ AllocationSpace retry_space);
// Initialize a filler object to keep the ability to iterate over the heap
// when shortening objects.
@@ -624,26 +640,26 @@ class Heap : public AllStatic {
// self_reference. This allows generated code to reference its own Code
// object by containing this pointer.
// Please note this function does not perform a garbage collection.
- static Object* CreateCode(const CodeDesc& desc,
- Code::Flags flags,
- Handle<Object> self_reference);
+ MUST_USE_RESULT static Object* CreateCode(const CodeDesc& desc,
+ Code::Flags flags,
+ Handle<Object> self_reference);
- static Object* CopyCode(Code* code);
+ MUST_USE_RESULT static Object* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
- static Object* CopyCode(Code* code, Vector<byte> reloc_info);
+ MUST_USE_RESULT static Object* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* LookupSymbol(Vector<const char> str);
- static Object* LookupAsciiSymbol(const char* str) {
+ MUST_USE_RESULT static Object* LookupSymbol(Vector<const char> str);
+ MUST_USE_RESULT static Object* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
- static Object* LookupSymbol(String* str);
+ MUST_USE_RESULT static Object* LookupSymbol(String* str);
static bool LookupSymbolIfExists(String* str, String** symbol);
static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@@ -658,7 +674,7 @@ class Heap : public AllStatic {
// string might stay non-flat even when not a failure is returned.
//
// Please note this function does not perform a garbage collection.
- static inline Object* PrepareForCompare(String* str);
+ MUST_USE_RESULT static inline Object* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value.
static Object* ToBoolean(bool condition) {
@@ -818,6 +834,13 @@ class Heap : public AllStatic {
roots_[kCodeStubsRootIndex] = value;
}
+ // Support for computing object sizes for old objects during GCs. Returns
+ // a function that is guaranteed to be safe for computing object sizes in
+ // the current GC phase.
+ static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+ return gc_safe_size_of_old_object_;
+ }
+
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
@@ -857,8 +880,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static Object* CreateSymbol(const char* str, int length, int hash);
- static Object* CreateSymbol(String* str);
+ MUST_USE_RESULT static Object* CreateSymbol(const char* str,
+ int length,
+ int hash);
+ MUST_USE_RESULT static Object* CreateSymbol(String* str);
// Write barrier support for address[offset] = o.
static inline void RecordWrite(Address address, int offset);
@@ -930,9 +955,9 @@ class Heap : public AllStatic {
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate uninitialized fixed array.
- static Object* AllocateRawFixedArray(int length);
- static Object* AllocateRawFixedArray(int length,
- PretenureFlag pretenure);
+ MUST_USE_RESULT static Object* AllocateRawFixedArray(int length);
+ MUST_USE_RESULT static Object* AllocateRawFixedArray(int length,
+ PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
@@ -975,8 +1000,9 @@ class Heap : public AllStatic {
kRootListLength
};
- static Object* NumberToString(Object* number,
- bool check_number_string_cache = true);
+ MUST_USE_RESULT static Object* NumberToString(
+ Object* number,
+ bool check_number_string_cache = true);
static Map* MapForExternalArrayType(ExternalArrayType array_type);
static RootListIndex RootIndexForExternalArrayType(
@@ -1021,6 +1047,8 @@ class Heap : public AllStatic {
static void ClearJSFunctionResultCaches();
+ static void ClearNormalizedMapCaches();
+
static GCTracer* tracer() { return tracer_; }
private:
@@ -1169,6 +1197,18 @@ class Heap : public AllStatic {
static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_;
+ // Support for computing object sizes during GC.
+ static HeapObjectCallback gc_safe_size_of_old_object_;
+ static int GcSafeSizeOfOldObject(HeapObject* object);
+ static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
+
+ // Update the GC state. Called from the mark-compact collector.
+ static void MarkMapPointersAsEncoded(bool encoded) {
+ gc_safe_size_of_old_object_ = encoded
+ ? &GcSafeSizeOfOldObjectWithEncodedMap
+ : &GcSafeSizeOfOldObject;
+ }
+
// Checks whether a global GC is necessary
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
@@ -1181,10 +1221,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
- static inline Object* AllocateRawMap();
+ MUST_USE_RESULT static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
- static inline Object* AllocateRawCell();
+ MUST_USE_RESULT static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj,
@@ -1222,7 +1262,6 @@ class Heap : public AllStatic {
// Code to be run before and after mark-compact.
static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
@@ -1246,9 +1285,10 @@ class Heap : public AllStatic {
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
- static inline Object* InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype);
+ MUST_USE_RESULT static inline Object* InitializeFunction(
+ JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype);
static GCTracer* tracer_;
@@ -1314,6 +1354,7 @@ class Heap : public AllStatic {
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
+ friend class MarkCompactCollector;
};
@@ -1861,7 +1902,7 @@ class TranscendentalCache {
// Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument.
- static inline Object* Get(Type type, double input) {
+ MUST_USE_RESULT static inline Object* Get(Type type, double input) {
TranscendentalCache* cache = caches_[type];
if (cache == NULL) {
caches_[type] = cache = new TranscendentalCache(type);
@@ -1874,7 +1915,7 @@ class TranscendentalCache {
static void Clear();
private:
- inline Object* Get(double input) {
+ MUST_USE_RESULT inline Object* Get(double input) {
Converter c;
c.dbl = input;
int hash = Hash(c);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 15c3198f..a095ef7b 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
namespace v8 {
@@ -95,10 +96,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc+1.
- __ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -699,17 +696,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
- __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
- __ mov(result,
- Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
// Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
@@ -1099,7 +1085,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the Array function.
- GenerateLoadArrayFunction(masm, edi);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map.
@@ -1135,7 +1121,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which
// does always have a map.
- GenerateLoadArrayFunction(masm, ebx);
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx);
__ cmp(edi, Operand(ebx));
__ Assert(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map.
@@ -1159,6 +1145,131 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ __ IncrementCounter(&Counters::string_ctor_calls, 1);
+
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
+ __ cmp(edi, Operand(ecx));
+ __ Assert(equal, "Unexpected String function");
+ }
+
+ // Load the first argument into eax and get rid of the rest
+ // (including the receiver).
+ Label no_arguments;
+ __ test(eax, Operand(eax));
+ __ j(zero, &no_arguments);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ push(ecx);
+ __ mov(eax, ebx);
+
+ // Lookup the argument in the number to string cache.
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ false, // Input is known to be smi?
+ &not_cached);
+ __ IncrementCounter(&Counters::string_ctor_cached_number, 1);
+ __ bind(&argument_is_string);
+ // ----------- S t a t e -------------
+ // -- ebx : argument converted to string
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate a JSValue and put the tagged pointer into eax.
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ if (FLAG_debug_code) {
+ __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
+ JSValue::kSize >> kPointerSizeLog2);
+ __ Assert(equal, "Unexpected string wrapper instance size");
+ __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
+ __ Assert(equal, "Unexpected unused properties of string wrapper");
+ }
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+
+ // Set properties and elements.
+ __ Set(ecx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+
+ // Set the value.
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // We're done. Return.
+ __ ret(0);
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &convert_argument);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
+ __ j(NegateCondition(is_string), &convert_argument);
+ __ mov(ebx, eax);
+ __ IncrementCounter(&Counters::string_ctor_string_value, 1);
+ __ jmp(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into ebx.
+ __ bind(&convert_argument);
+ __ IncrementCounter(&Counters::string_ctor_conversions, 1);
+ __ EnterInternalFrame();
+ __ push(edi); // Preserve the function.
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ pop(edi);
+ __ LeaveInternalFrame();
+ __ mov(ebx, eax);
+ __ jmp(&argument_is_string);
+
+ // Load the empty string into ebx, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ Set(ebx, Immediate(Factory::empty_string()));
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ push(ecx);
+ __ jmp(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(&Counters::string_ctor_gc_required, 1);
+ __ EnterInternalFrame();
+ __ push(ebx);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ ret(0);
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, Operand(esp));
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
new file mode 100644
index 00000000..81068866
--- /dev/null
+++ b/src/ia32/code-stubs-ia32.cc
@@ -0,0 +1,4540 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
+#include "code-stubs.h"
+#include "bootstrapper.h"
+#include "jsregexp.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in esi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function info from the stack.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+ __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
+ Immediate(Factory::the_hole_value()));
+ __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
+ __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
+ __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(ecx); // Temporarily remove return address.
+ __ pop(edx);
+ __ push(esi);
+ __ push(edx);
+ __ push(ecx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
+
+ // Setup the fixed slots.
+ __ xor_(ebx, Operand(ebx)); // Set to NULL.
+ __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
+
+ // Copy the global object from the surrounding context. We go through the
+ // context in the function (ecx) to match the allocation behavior we have
+ // in the runtime system (see Heap::AllocateFunctionContext).
+ __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
+ __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+
+ // Initialize the rest of the slots to undefined.
+ __ mov(ebx, Factory::undefined_value());
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ mov(esi, Operand(eax));
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: constant elements.
+ // [esp + (2 * kPointerSize)]: literal index.
+ // [esp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(ecx, Factory::undefined_value());
+ __ j(equal, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Handle<Map> expected_map;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map = Factory::fixed_array_map();
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map = Factory::fixed_cow_array_map();
+ }
+ __ push(ecx);
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
+ __ Assert(equal, message);
+ __ pop(ecx);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ __ fldz();
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ FCmp();
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in eax.
+ __ bind(&true_result);
+ __ mov(eax, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ mov(eax, 0);
+ __ ret(1 * kPointerSize);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ mov(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(Immediate(right));
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(Immediate(left));
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ mov(right_arg, right);
+ __ mov(left_arg, Immediate(left));
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in eax, operand_2 in edx; falls through on float
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch);
+
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+ static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
+
+ // Test if operands are smis or heap numbers and load them
+ // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm);
+
+ // Test if operands are numbers (smi or HeapNumber objects), and load
+ // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
+ // either operand is not a number. Operands are in edx and eax.
+ // Leaves operands unchanged.
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+};
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
+
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ __ or_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_AND) {
+ __ and_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ } else if (op_ == Token::BIT_XOR) {
+ __ xor_(right, Operand(left));
+ GenerateReturn(masm);
+ return;
+ }
+ }
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
+ switch (op_) {
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
+ break;
+
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
+ break;
+
+ default:
+ break;
+ }
+
+ // 3. Perform the smi check of the operands.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis, not_taken);
+
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
+ case Token::SUB:
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
+ // Remove tag from one of the operands (but keep sign).
+ __ SmiUntag(right);
+ // Do multiplication.
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ break;
+
+ case Token::DIV:
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+ // Check that the remainder is zero.
+ __ test(edx, Operand(edx));
+ __ j(not_zero, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(eax);
+ break;
+
+ case Token::MOD:
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &not_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
+ // Move remainder to register eax.
+ __ mov(eax, edx);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ break;
+ }
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
+ switch (op_) {
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
+ break;
+ }
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+ // Generate fast case smi code if requested. This flag is set when the fast
+ // case smi code is not generated by the caller. Generating it here will speed
+ // up common operations.
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ if (static_operands_type_.IsSmi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(edx);
+ __ AbortIfNotSmi(eax);
+ }
+ FloatingPointHelper::LoadSSE2Smis(masm, ecx);
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm);
+ }
+ } else {
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ GenerateHeapResultAllocation(masm, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ } else { // SSE2 not available, use FPU.
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(edx);
+ __ AbortIfNotNumber(eax);
+ }
+ } else {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ }
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
+ }
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // Try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm,
+ static_operands_type_,
+ use_sse3_,
+ &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ GenerateReturn(masm);
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, not_string1, string1, string1_smi2;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in edx, eax
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+ if (HasArgsReversed()) {
+ lhs = eax;
+ rhs = edx;
+ } else {
+ lhs = edx;
+ rhs = eax;
+ }
+
+ // Test if first argument is a string.
+ __ test(lhs, Immediate(kSmiTagMask));
+ __ j(zero, &not_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a string, test second.
+ __ test(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings. Jump to the string add stub.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, edi, ebx, ecx, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ __ test(rhs, Immediate(kSmiTagMask));
+ __ j(zero, &not_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ ASSERT(!HasArgsInRegisters());
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(ecx);
+ if (HasArgsReversed()) {
+ __ push(eax);
+ __ push(edx);
+ } else {
+ __ push(edx);
+ __ push(eax);
+ }
+ __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ __ pop(ecx); // Save return address.
+
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // esp[4]: argument (should be number).
+ // esp[0]: return address.
+ // Test that eax is a number.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the low and high words of the double into ebx, edx.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ sar(eax, 1);
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ mov(Operand(esp, 0), eax);
+ __ fild_s(Operand(esp, 0));
+ __ fst_d(Operand(esp, 0));
+ __ pop(edx);
+ __ pop(ebx);
+ __ jmp(&loaded);
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // low and high words into ebx, edx.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+
+ __ bind(&loaded);
+ // ST[0] == double value
+ // ebx = low 32 bits of double value
+ // edx = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ mov(ecx, ebx);
+ __ xor_(ecx, Operand(edx));
+ __ mov(eax, ecx);
+ __ sar(eax, 16);
+ __ xor_(ecx, Operand(eax));
+ __ mov(eax, ecx);
+ __ sar(eax, 8);
+ __ xor_(ecx, Operand(eax));
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // ebx = low 32 bits of double value.
+ // edx = high 32 bits of double value.
+ // ecx = TranscendentalCache::hash(double value).
+ __ mov(eax,
+ Immediate(ExternalReference::transcendental_cache_array_address()));
+ // Eax points to cache array.
+ __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // Eax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+ // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+ __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+ __ lea(ecx, Operand(eax, ecx, times_4, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmp(ebx, Operand(ecx, 0));
+ __ j(not_equal, &cache_miss);
+ __ cmp(edx, Operand(ecx, kIntSize));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ mov(eax, Operand(ecx, 2 * kIntSize));
+ __ fstp(0);
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ // We are short on registers, so use no_reg as scratch.
+ // This gives slightly larger code.
+ __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+ GenerateOperation(masm);
+ __ mov(Operand(ecx, 0), ebx);
+ __ mov(Operand(ecx, kIntSize), edx);
+ __ mov(Operand(ecx, 2 * kIntSize), eax);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+ // Only free register is edi.
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ mov(edi, edx);
+ __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ int supported_exponent_limit =
+ (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+ __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ j(below, &in_range, taken);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmp(Operand(edi), Immediate(0x7ff00000));
+ Label non_nan_result;
+ __ j(not_equal, &non_nan_result, taken);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ // NaN is represented by 0x7ff8000000000000.
+ __ push(Immediate(0x7ff80000));
+ __ push(Immediate(0));
+ __ fld_d(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ mov(edi, eax); // Save eax before using fnstsw_ax.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ test(Operand(eax), Immediate(5));
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ __ fstp(0);
+ __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
+
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+ return;
+ }
+ if (!type_info.IsInteger32() || !use_sse3) {
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ }
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ if (!type_info.IsInteger32()) {
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ }
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ if (!type_info.IsDouble()) {
+ if (!type_info.IsSmi()) {
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(edx);
+ }
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+ }
+
+ __ bind(&arg1_is_object);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ if (!type_info.IsDouble()) {
+ // Test if arg2 is a Smi.
+ if (!type_info.IsSmi()) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ }
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+ }
+
+ __ bind(&arg2_is_object);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ // Test if arg1 is a Smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm,
+ edx,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+
+ // Test if arg2 is a Smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ use_sse3,
+ conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ if (type_info.IsNumber()) {
+ LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
+ } else {
+ LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
+ }
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ test(number, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi, not_taken);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ SmiUntag(number);
+ __ push(number);
+ __ fild_s(Operand(esp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+ Label load_smi_edx, load_eax, load_smi_eax, done;
+ // Load operand in edx into xmm0.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+ // Load operand in edx into xmm0, or branch to not_numbers.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, not_numbers); // Argument in edx is not a number.
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ bind(&load_eax);
+ // Load operand in eax into xmm1, or branch to not_numbers.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(equal, &load_float_eax);
+ __ jmp(not_numbers); // Argument in eax is not a number.
+ __ bind(&load_smi_edx);
+ __ SmiUntag(edx); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm0, Operand(edx));
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
+ __ jmp(&load_eax);
+ __ bind(&load_smi_eax);
+ __ SmiUntag(eax); // Untag smi before converting to float.
+ __ cvtsi2sd(xmm1, Operand(eax));
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
+ __ jmp(&done);
+ __ bind(&load_float_eax);
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in eax is OK
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
+
+ if (negative_zero_ == kStrictNegativeZero) {
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+ }
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(no_overflow, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm,
+ eax,
+ TypeInfo::Unknown(),
+ CpuFeatures::IsSupported(SSE3),
+ &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (overwrite_ == UNARY_NO_OVERWRITE) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in edx and the parameter count is in eax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(edx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebp, eax, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(edx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
+ __ lea(ebx, Operand(ebx, ecx, times_2, 0));
+ __ neg(edx);
+ __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(ebx); // Return address.
+ __ push(edx);
+ __ push(ebx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[16] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &add_arguments_object);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
+ __ mov(ebx, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ j(above, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the second argument is a string.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to ebx.
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+
+ // ebx: Length of subject string as a smi
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ cmp(eax, Operand(ebx));
+ __ j(above_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ and_(ebx,
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // eax: first part of cons string.
+ // ebx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask | kStringEncodingMask);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
+ kStringRepresentationMask);
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // eax: subject string (flat ascii)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string (flat two byte)
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(edx, CODE_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
+
+ // eax: subject string
+ // ebx: previous index
+ // edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ mov(Operand(esp, 5 * kPointerSize), ecx);
+
+ // Argument 5: static offsets vector buffer.
+ __ mov(Operand(esp, 4 * kPointerSize),
+ Immediate(ExternalReference::address_of_static_offsets_vector()));
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &setup_two_byte);
+ __ SmiUntag(edi);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+ __ jmp(&setup_rest);
+
+ __ bind(&setup_two_byte);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
+
+ __ bind(&setup_rest);
+
+ // Argument 2: Previous index.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+ // Argument 1: Subject string.
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+
+ // Locate the code entry and call it.
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(edx, kRegExpExecuteArguments);
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ j(equal, &success, taken);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure, taken);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(Operand(eax), Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(Operand(edx), Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_int_size, 0));
+ __ SmiTag(edi);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+ __ mov(number_string_cache,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ __ sub(Operand(mask), Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ if (object_is_smi) {
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ } else {
+ Label not_smi, hash_calculated;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smi);
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ jmp(&smi_hash_calculated);
+ __ bind(&not_smi);
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ Register probe = mask;
+ __ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ test(probe, Immediate(kSmiTagMask));
+ __ j(zero, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ } else {
+ __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ __ FCmp();
+ }
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, Operand(mask));
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ mov(ebx, Operand(esp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Identical objects can be compared fast, but there are some tricky cases
+ // for NaN and undefined.
+ {
+ Label not_identical;
+ __ cmp(eax, Operand(edx));
+ __ j(not_equal, &not_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, &check_for_nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ } else {
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc_ == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ } else {
+ Label nan;
+ __ j(above_equal, &nan);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ }
+ }
+
+ __ bind(&not_identical);
+ }
+
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc_ == equal && strict_) {
+ Label slow; // Fallthrough label.
+ Label not_smis;
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
+ __ j(not_zero, &not_smis);
+ // One operand is a smi.
+
+ // Check whether the non-smi is a heap number.
+ STATIC_ASSERT(kSmiTagMask == 1);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(Operand(ecx), Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
+ // if eax was smi, ebx is now edx, else eax.
+
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object);
+
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
+
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, Operand(ecx));
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, Operand(ecx));
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, not_taken);
+
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, not_taken);
+ __ j(above, &above_label, not_taken);
+
+ __ xor_(eax, Operand(eax));
+ __ ret(0);
+
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
+
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+ }
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_both_objects);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &not_both_objects);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(below, &not_both_objects);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0); // rax, rdx were pushed
+ __ bind(&not_both_objects);
+ }
+
+ // Push arguments below the return address.
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ test(object, Immediate(kSmiTagMask));
+ __ j(zero, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(eax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(eax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &receiver_is_value, not_taken);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &slow, not_taken);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
+ __ Set(eax, Immediate(argc_));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // eax holds the exception.
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Restore next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // Remove state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of
+ // a JS entry frame.
+ __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
+ Label skip;
+ __ cmp(ebp, 0);
+ __ j(equal, &skip, not_taken);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_BSD_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label empty_handle;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(kStackSpace, kArgc);
+ STATIC_ASSERT(kArgc == 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &empty_handle, not_taken);
+ // It was non-zero. Dereference to get the result value.
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame();
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ bind(&empty_handle);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // eax: result parameter for PerformGC, if any
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: pointer to the first argument (C callee-saved)
+
+ // Result returned in eax, or eax+edx if result_size_ is 2.
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack alignment is known to be correct. This function takes one argument
+ // which is passed on the stack, and we know that the stack has been
+ // prepared to pass at least one argument.
+ __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
+ __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ inc(Operand::StaticVariable(scope_depth));
+ }
+
+ // Call C function.
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ call(Operand(ebx));
+ // Result is in eax or edx:eax - do not destroy these registers!
+
+ if (always_allocate_scope) {
+ __ dec(Operand::StaticVariable(scope_depth));
+ }
+
+ // Make sure we're not trying to return 'the hole' from the runtime
+ // call as this may lead to crashes in the IC code later.
+ if (FLAG_debug_code) {
+ Label okay;
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &okay);
+ __ int3();
+ __ bind(&okay);
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ lea(ecx, Operand(eax, 1));
+ // Lower 2 bits of ecx are 0 iff eax has failure tag.
+ __ test(ecx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned, not_taken);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame();
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry, taken);
+
+ // Special handling of out of memory exceptions.
+ __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ mov(eax, Operand::StaticVariable(pending_exception_address));
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception_address), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, Factory::termination_exception());
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop sp to the top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ mov(esp, Operand(esp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(Operand::StaticVariable(handler_address));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(eax, false);
+ __ mov(Operand::StaticVariable(external_caught), eax);
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ }
+
+ // Clear the context pointer.
+ __ xor_(esi, Operand(esi));
+
+ // Restore fp from handler and discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ __ pop(ebp);
+ __ pop(edx); // State.
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // eax: number of arguments including receiver
+ // ebx: pointer to C function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // esi: current context (C callee-saved)
+ // edi: JS function of the caller (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects instead
+ // of a proper result. The builtin entry handles this by performing
+ // a garbage collection and retrying the builtin (twice).
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame();
+
+ // eax: result parameter for PerformGC, if any (setup below)
+ // ebx: pointer to builtin function (C callee-saved)
+ // ebp: frame pointer (restored after C call)
+ // esp: stack pointer (restored after C call)
+ // edi: number of arguments including receiver (C callee-saved)
+ // esi: argv pointer (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
+
+ // Push marker in two places.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
+ __ push(edi);
+ __ push(esi);
+ __ push(ebx);
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ push(Operand::StaticVariable(c_entry_fp));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ j(not_equal, &not_outermost_js);
+ __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ bind(&not_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ mov(edx,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. Notice that we
+ // cannot store a reference to the trampoline code directly in this
+ // stub, because the builtin stubs may not have been generated yet.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(edx, Immediate(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(edx, Immediate(entry));
+ }
+ __ mov(edx, Operand(edx, 0)); // deref address
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ call(Operand(edx));
+
+ // Unlink this frame from the handler chain.
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ // Pop next_sp.
+ __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
+ __ j(not_equal, &not_outermost_js_2);
+ __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
+
+ // Restore callee-saved registers (C calling conventions).
+ __ pop(ebx);
+ __ pop(esi);
+ __ pop(edi);
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(ebp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the left hand is a JS object.
+ __ IsObjectJSObjectType(eax, eax, edx, &slow);
+
+ // Get the prototype of the function.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ // edx is function, eax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ j(not_equal, &miss);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+
+ // Register mapping:
+ // eax is object map.
+ // edx is function.
+ // ebx is function prototype.
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+
+ __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(ecx, Operand(ebx));
+ __ j(equal, &is_instance);
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ j(equal, &is_not_instance);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ Set(eax, Immediate(0));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s",
+ cc_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(object_, Immediate(kSmiTagMask));
+ __ j(zero, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(index_, Immediate(kSmiTagMask));
+ __ j(not_zero, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ test(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ test(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzx_w(result_, FieldOperand(object_,
+ scratch_, times_1, // Scratch is smi-tagged.
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ SmiUntag(scratch_);
+ __ movzx_b(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ SmiTag(result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, eax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(scratch_, Immediate(kSmiTagMask));
+ __ j(not_zero, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ test(code_,
+ Immediate(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ j(not_zero, &slow_case_, not_taken);
+
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ // At this point code register contains smi tagged ascii char code.
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, Factory::undefined_value());
+ __ j(equal, &slow_case_, not_taken);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // eax: first string
+ // edx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in eax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in edx.
+ __ mov(eax, edx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // eax: first string
+ // ebx: length of first string as a smi
+ // ecx: length of second string as a smi
+ // edx: second string
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ __ bind(&both_not_zero_length);
+ __ add(ebx, Operand(ecx));
+ STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
+ // Handle exceptionally long strings in the runtime system.
+ __ j(overflow, &string_add_runtime);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(ebx, Immediate(Smi::FromInt(2)));
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ j(below, &string_add_flat_result);
+
+ // If result is not supposed to be flat allocate a cons string object. If both
+ // strings are ascii the result is an ascii cons string.
+ Label non_ascii, allocated, ascii_data;
+ __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ and_(ecx, Operand(edi));
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ __ bind(&ascii_data);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
+ __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
+ __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ mov(eax, ecx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // ecx: first instance type AND second instance type.
+ // edi: second instance type.
+ __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, Operand(ecx));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&string_add_flat_result);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // eax: first string
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ Label non_ascii_string_add_flat_result;
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
+ // Both strings are ascii strings. As they are short they are both flat.
+ // ebx: length of resulting flat string as a smi
+ __ SmiUntag(ebx);
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // eax: first string - known to be two byte
+ // ebx: length of resulting flat string as a smi
+ // edx: second string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ SmiUntag(ebx);
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ SmiUntag(edi);
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ } else {
+ __ mov_w(scratch, Operand(src, 0));
+ __ mov_w(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
+ }
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords.
+ // The destination is aligned on a 4 byte boundary because we are
+ // copying to the beginning of a newly allocated string.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ cld();
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ mov(scratch, c1);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ mov(scratch, c2);
+ __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+ __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, kBitsPerByte);
+ __ or_(chars, Operand(c2));
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+ __ mov(symbol_table,
+ Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ SmiUntag(mask);
+ __ sub(Operand(mask), Immediate(1));
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // symbol_table: symbol table
+ // mask: capacity mask
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ mov(scratch, hash);
+ if (i > 0) {
+ __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ and_(scratch, Operand(mask));
+
+ // Load the entry from the symbol table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ mov(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmp(candidate, Factory::undefined_value());
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ cmp(FieldOperand(candidate, String::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &next_probe[i]);
+
+ // As we are out of registers save the mask on the stack and use that
+ // register as a temporary.
+ __ push(mask);
+ Register temp = mask;
+
+ // Check that the candidate is a non-external ascii string.
+ __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe_pop_mask[i]);
+
+ // Check if the two characters match.
+ __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ and_(temp, 0x0000ffff);
+ __ cmp(chars, Operand(temp));
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe_pop_mask[i]);
+ __ pop(mask);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ __ pop(mask); // Pop saved mask from the stack.
+ if (!result.is(eax)) {
+ __ mov(eax, result);
+ }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ mov(hash, character);
+ __ shl(hash, 10);
+ __ add(hash, Operand(character));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ add(hash, Operand(character));
+ // hash += hash << 10;
+ __ mov(scratch, hash);
+ __ shl(scratch, 10);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 6;
+ __ mov(scratch, hash);
+ __ sar(scratch, 6);
+ __ xor_(hash, Operand(scratch));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ mov(scratch, hash);
+ __ shl(scratch, 3);
+ __ add(hash, Operand(scratch));
+ // hash ^= hash >> 11;
+ __ mov(scratch, hash);
+ __ sar(scratch, 11);
+ __ xor_(hash, Operand(scratch));
+ // hash += hash << 15;
+ __ mov(scratch, hash);
+ __ shl(scratch, 15);
+ __ add(hash, Operand(scratch));
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ test(hash, Operand(hash));
+ __ j(not_zero, &hash_not_zero);
+ __ mov(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+
+ // Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ sub(ecx, Operand(edx));
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label return_eax;
+ __ j(equal, &return_eax);
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ SmiUntag(ecx); // Result length is no longer smi.
+ __ cmp(ecx, 2);
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (value is 2)
+ // edx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiUntag(edx); // From index is no longer smi.
+ __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx,
+ FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ Set(ecx, Immediate(2));
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ SmiUntag(ebx);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&non_ascii_flat);
+ // eax: string
+ // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // ecx: result string length
+ // Check for flat two byte string
+ __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+
+ __ bind(&return_eax);
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+
+ // Find minimum length.
+ Label left_shorter;
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ SmiUntag(min_length);
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
+ }
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ __ test(length_delta, Operand(length_delta));
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&result_not_equal);
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
+ __ ret(0);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &not_same);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+
+ // Compare flat ascii strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(ecx);
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
new file mode 100644
index 00000000..acf4a6f9
--- /dev/null
+++ b/src/ia32/code-stubs-ia32.h
@@ -0,0 +1,360 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_CODE_STUBS_IA32_H_
+#define V8_IA32_CODE_STUBS_IA32_H_
+
+#include "macro-assembler.h"
+#include "code-stubs.h"
+#include "ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags,
+ TypeInfo operands_type)
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false),
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ if (static_operands_type_.IsSmi()) {
+ mode_ = NO_OVERWRITE;
+ }
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ use_sse3_(SSE3Bits::decode(key)),
+ static_operands_type_(TypeInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(runtime_operands_type),
+ name_(NULL) {
+ }
+
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ bool ArgsInRegistersSupported() {
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
+ }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
+ bool use_sse3_;
+
+ // Number type information of operands, determined by code generator.
+ TypeInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_),
+ static_operands_type_.ToString());
+ }
+#endif
+
+ // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
+ class ArgsReversedBits: public BitField<bool, 11, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_)
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of above.
+ bool ascii);
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register eax.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {
+ }
+
+ // Compare two flat ascii strings and returns result in eax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index a48c74e9..d399c35f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -29,8 +29,9 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "bootstrapper.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
@@ -934,97 +935,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
}
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* operand_conversion_failure);
-
- // Test if operands are smis or heap numbers and load them
- // into xmm0 and xmm1 if they are. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-};
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
// Perform or call the specialized stub for a binary operation. Requires the
// three registers left, right and dst to be distinct and spilled. This
// deferred operation has up to three entry points: The main one calls the
@@ -1541,7 +1451,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
false, overwrite_mode);
@@ -1564,7 +1474,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_GENERIC_BINARY_FLAGS,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
}
}
@@ -1573,6 +1483,20 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
}
+Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right) {
+ if (stub->ArgsInRegistersSupported()) {
+ stub->SetArgsInRegisters();
+ return frame_->CallStub(stub, left, right);
+ } else {
+ frame_->Push(left);
+ frame_->Push(right);
+ return frame_->CallStub(stub, 2);
+ }
+}
+
+
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
Object* answer_object = Heap::undefined_value();
switch (op) {
@@ -2772,41 +2696,6 @@ void CodeGenerator::Comparison(AstNode* node,
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ cmp(operand.reg(), Factory::null_value());
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ cmp(operand.reg(), Factory::undefined_value());
- dest->true_target()->Branch(equal);
- __ test(operand.reg(), Immediate(kSmiTagMask));
- dest->false_target()->Branch(equal);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
} else if (left_side_constant_1_char_string ||
right_side_constant_1_char_string) {
if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
@@ -5817,12 +5706,9 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5909,12 +5795,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame()->Push(&value);
Load(node->value());
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -6012,11 +5895,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -6428,11 +6308,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -6445,8 +6324,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// constructor invocation.
CodeForSourcePosition(node->position());
Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
@@ -7447,7 +7325,6 @@ void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
Label empty;
__ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
__ j(equal, &empty);
- ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
__ mov(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::fixed_cow_array_map()));
__ bind(&empty);
@@ -8055,6 +7932,42 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(value.reg());
+ }
+
+ __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result string = frame_->Pop();
+ string.ToRegister();
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(string.reg());
+ }
+
+ Result number = allocator()->Allocate();
+ ASSERT(number.is_valid());
+ __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
+ __ IndexFromHash(number.reg(), number.reg());
+ string.Unuse();
+ frame_->Push(&number);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
ASSERT(!in_safe_int32_mode());
if (CheckForInlineRuntimeCall(node)) {
@@ -8218,9 +8131,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->Push(&value);
} else {
Load(node->expression());
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
@@ -8925,11 +8836,9 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ if (node->left()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ } else if (node->right()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_RIGHT;
}
@@ -9161,6 +9070,41 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ ASSERT(!in_safe_int32_mode());
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ cmp(operand.reg(), Factory::null_value());
+ if (node->is_strict()) {
+ operand.Unuse();
+ destination()->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ destination()->true_target()->Branch(equal);
+ __ cmp(operand.reg(), Factory::undefined_value());
+ destination()->true_target()->Branch(equal);
+ __ test(operand.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ temp.Unuse();
+ operand.Unuse();
+ destination()->Split(not_zero);
+ }
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
@@ -9886,4425 +9830,6 @@ void Reference::SetValue(InitState init_state) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
- __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ mov(ebx, Immediate(Factory::empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(Factory::the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Setup the object header.
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Setup the fixed slots.
- __ xor_(ebx, Operand(ebx)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
- __ cmp(ecx, Factory::undefined_value());
- __ j(equal, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = Factory::fixed_array_map();
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = Factory::fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // 'null' => false.
- __ cmp(eax, Factory::null_value());
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &true_result);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in eax.
- __ bind(&true_result);
- __ mov(eax, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(eax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ mov(right_arg, right);
- } else if (right.is(right_arg)) {
- __ mov(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ mov(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ mov(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ mov(right_arg, right);
- __ mov(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ mov(left_arg, left);
- __ mov(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(Immediate(right));
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (left.is(left_arg)) {
- __ mov(right_arg, Immediate(right));
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ mov(left_arg, Immediate(right));
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ mov(left_arg, left);
- __ mov(right_arg, Immediate(right));
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(Immediate(left));
- __ push(right);
- } else {
- // The calling convention with registers is left in edx and right in eax.
- Register left_arg = edx;
- Register right_arg = eax;
- if (right.is(right_arg)) {
- __ mov(left_arg, Immediate(left));
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ mov(right_arg, Immediate(left));
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ mov(right_arg, right);
- __ mov(left_arg, Immediate(left));
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = eax;
- right = ebx;
- if (HasArgsInRegisters()) {
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ mov(right, Operand(esp, 1 * kPointerSize));
- __ mov(left, Operand(esp, 2 * kPointerSize));
- }
-
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- __ or_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_AND) {
- __ and_(right, Operand(left));
- GenerateReturn(masm);
- return;
- } else if (op_ == Token::BIT_XOR) {
- __ xor_(right, Operand(left));
- GenerateReturn(masm);
- return;
- }
- }
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, Operand(left));
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, Operand(left));
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis, not_taken);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- break;
-
- case Token::SUB:
- __ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, Operand(edx));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, Operand(right));
- __ j(zero, &not_smis, not_taken);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax.
- GenerateReturn(masm);
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::SHL: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- // It's OK to overwrite the right argument on the stack because we
- // are about to return.
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op_) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, Operand(left));
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, Operand(right));
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- GenerateReturn(masm);
- break;
- }
-
- default:
- break;
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op_) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
-
- // Generate fast case smi code if requested. This flag is set when the fast
- // case smi code is not generated by the caller. Generating it here will speed
- // up common operations.
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
-
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- if (static_operands_type_.IsSmi()) {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(edx);
- __ AbortIfNotSmi(eax);
- }
- FloatingPointHelper::LoadSSE2Smis(masm, ecx);
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm);
- }
- } else {
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- } else { // SSE2 not available, use FPU.
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(edx);
- __ AbortIfNotNumber(eax);
- }
- } else {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- }
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- GenerateReturn(masm);
- __ bind(&after_alloc_failure);
- __ ffree();
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // Try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm,
- static_operands_type_,
- use_sse3_,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- GenerateReturn(masm);
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- GenerateReturn(masm);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Test for string arguments before calling runtime.
- Label not_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in edx, eax
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- if (HasArgsReversed()) {
- lhs = eax;
- rhs = edx;
- } else {
- lhs = edx;
- rhs = eax;
- }
-
- // Test if first argument is a string.
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &not_string1);
-
- // First argument is a string, test second.
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &string1);
-
- // First and second argument are strings. Jump to the string add stub.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, edi, ebx, ecx, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, Operand(ebx));
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, Operand(edx));
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- // If arguments are not passed in registers read them from the stack.
- ASSERT(!HasArgsInRegisters());
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(ecx);
- if (HasArgsReversed()) {
- __ push(eax);
- __ push(edx);
- } else {
- __ push(edx);
- __ push(eax);
- }
- __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- __ pop(ecx); // Save return address.
-
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // esp[4]: argument (should be number).
- // esp[0]: return address.
- // Test that eax is a number.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- // ST[0] == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- __ mov(eax,
- Immediate(ExternalReference::transcendental_cache_array_address()));
- // Eax points to cache array.
- __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- __ fstp(0);
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- GenerateOperation(masm);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
- // Only free register is edi.
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, taken);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, Factory::heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(equal, &load_float_eax);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- if (negative_zero_ == kStrictNegativeZero) {
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
- }
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- Label undo;
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(no_overflow, &done, taken);
-
- // Restore eax and go slow case.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
- __ jmp(&slow);
-
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_ == UNARY_OVERWRITE) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow, not_taken);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- CpuFeatures::IsSupported(SSE3),
- &slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, not_taken);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ jmp(&done);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (overwrite_ == UNARY_NO_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite eax until
- // we're sure we can do it without going through the slow case
- // that needs the value in eax.
- __ AllocateHeapNumber(ebx, edx, edi, &slow);
- __ mov(eax, Operand(ebx));
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- } else {
- UNIMPLEMENTED();
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[16] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
- __ j(zero, &add_arguments_object);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::arguments_callee_index == 0);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::arguments_length_index == 1);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, Operand(ecx));
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime, invoke_regexp;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
- __ j(above, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ cmp(eax, Operand(ebx));
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
- __ j(greater, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ and_(ebx,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ test(Operand(ebx),
- Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
- __ cmp(Operand(edx), Factory::empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(edi, Immediate(1)); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(edi, Immediate(0)); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(edx, CODE_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // eax: subject string
- // edx: code
- // edi: encoding of subject string (1 if ascii, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // edi: encoding of subject string (1 if ascii 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 5 * kPointerSize), ecx);
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector()));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ test(edi, Operand(edi));
- __ mov(edi, FieldOperand(eax, String::kLengthOffset));
- __ j(zero, &setup_two_byte);
- __ SmiUntag(edi);
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Subject string.
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(edx, kRegExpExecuteArguments);
-
- // Check the result.
- Label success;
- __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ cmp(eax, Operand::StaticVariable(pending_exception));
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(Operand(eax), Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ mov(ecx, ebx);
- __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector();
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi, hash_calculated;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smi);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, Operand(edx));
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ cmp(edx, Factory::undefined_value());
- __ j(not_equal, &check_for_nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
- }
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, &not_smis);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
-
- __ xor_(eax, Operand(eax));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(eax);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(eax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // eax holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // Remove state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of
- // a JS entry frame.
- __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
- Label skip;
- __ cmp(ebp, 0);
- __ j(equal, &skip, not_taken);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly. If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
-#else
-static const bool kPassHandlesDirectly = false;
-#endif
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
- STATIC_ASSERT(kArgc == 4);
- if (kPassHandlesDirectly) {
- // When handles as passed directly we don't have to allocate extra
- // space for and pass an out parameter.
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
- } else {
- // The function expects three arguments to be passed but we allocate
- // four to get space for the output cell. The argument slots are filled
- // as follows:
- //
- // 3: output cell
- // 2: arguments pointer
- // 1: name
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function.
- __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
- __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
- __ mov(ebx, esp);
- __ add(Operand(ebx), Immediate(3 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
- }
- // Call the api function!
- __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
- __ cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(Factory::the_hole_value()));
- __ j(not_equal, &promote_scheduled_exception, not_taken);
- if (!kPassHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- __ mov(eax, Operand(eax, 0));
- }
- // Check if the result handle holds 0.
- __ test(eax, Operand(eax));
- __ j(zero, &empty_handle, not_taken);
- // It was non-zero. Dereference to get the result value.
- __ mov(eax, Operand(eax, 0));
- __ bind(&prologue);
- __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
- __ ret(0);
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- __ bind(&empty_handle);
- // It was zero; the result is undefined.
- __ mov(eax, Factory::undefined_value());
- __ jmp(&prologue);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ call(Operand(ebx));
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- Label okay;
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &okay);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
-
- // Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, Factory::termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ mov(esp, Operand(esp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(eax, false);
- __ mov(Operand::StaticVariable(external_caught), eax);
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ mov(Operand::StaticVariable(pending_exception), eax);
- }
-
- // Clear the context pointer.
- __ xor_(esi, Operand(esi));
-
- // Restore fp from handler and discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // State.
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(ebp);
- __ mov(ebp, Operand(esp));
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ push(Operand::StaticVariable(c_entry_fp));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ mov(edx,
- Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. Notice that we
- // cannot store a reference to the trampoline code directly in this
- // stub, because the builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
-
- // Unlink this frame from the handler chain.
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
- // Pop next_sp.
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the left hand is a JS object.
- __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
- // Get the prototype of the function.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
- // edx is function, eax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ j(not_equal, &miss);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
-
- // Check that the function prototype is a JS object.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
-
- // Register mapping:
- // eax is object map.
- // edx is function.
- // ebx is function prototype.
- __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
-
- __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(ecx, Operand(ebx));
- __ j(equal, &is_instance);
- __ cmp(Operand(ecx), Immediate(Factory::null_value()));
- __ j(equal, &is_not_instance);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ Set(eax, Immediate(0));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ test(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
- Immediate(Factory::empty_string()));
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ test(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiUntag(scratch_);
- __ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(scratch_, eax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
-
- __ Set(result_, Immediate(Factory::single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, Factory::undefined_value());
- __ j(equal, &slow_case_, not_taken);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in eax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(ebx, Immediate(Smi::FromInt(2)));
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ test(ecx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(Operand(ecx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
- }
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, Operand(count));
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- ExternalReference roots_address = ExternalReference::roots_address();
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_address));
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // symbol_table: symbol table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(mask));
-
- // Load the entry from the symbol table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- __ cmp(candidate, Factory::undefined_value());
- __ j(equal, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ascii string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, Operand(character));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, Operand(character));
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, Operand(scratch));
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, Operand(scratch));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ test(hash, Operand(hash));
- __ j(not_zero, &hash_not_zero);
- __ mov(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
- __ sub(ecx, Operand(edx));
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // eax: string
- // ebx: instance type
- // ecx: sub string length (value is 2)
- // edx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiUntag(edx); // From index is no longer smi.
- __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx,
- FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
-
- __ bind(&result_longer_than_two);
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- __ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(Operand(edi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(3 * kPointerSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Label result_not_equal;
- Label result_greater;
- Label compare_lengths;
-
- __ IncrementCounter(&Counters::string_compare_native, 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- __ test(min_length, Operand(min_length));
- __ j(zero, &compare_lengths);
-
- // Change index to run from -min_length to -1 by adding min_length
- // to string start. This means that loop ends when index reaches zero,
- // which doesn't need an additional compare.
- __ SmiUntag(min_length);
- __ lea(left,
- FieldOperand(left,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ lea(right,
- FieldOperand(right,
- min_length, times_1,
- SeqAsciiString::kHeaderSize));
- __ neg(min_length);
-
- Register index = min_length; // index = -min_length;
-
- {
- // Compare loop.
- Label loop;
- __ bind(&loop);
- // Compare characters.
- __ mov_b(scratch2, Operand(left, index, times_1, 0));
- __ cmpb(scratch2, Operand(right, index, times_1, 0));
- __ j(not_equal, &result_not_equal);
- __ add(Operand(index), Immediate(1));
- __ j(not_zero, &loop);
- }
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&result_not_equal);
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(&Counters::string_compare_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ascii strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
#undef __
#define __ masm.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index ce1bcf6a..2a8d313e 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -574,6 +574,11 @@ class CodeGenerator: public AstVisitor {
void Int32BinaryOperation(BinaryOperation* node);
+ // Generate a stub call from the virtual frame.
+ Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right);
+
void Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -730,6 +735,9 @@ class CodeGenerator: public AstVisitor {
// Check whether two RegExps are equivalent
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -803,327 +811,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type)
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- if (static_operands_type_.IsSmi()) {
- mode_ = NO_OVERWRITE;
- }
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(runtime_operands_type),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
- bool use_sse3_;
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
- class ArgsReversedBits: public BitField<bool, 11, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
- class StaticTypeInfoBits: public BitField<int, 13, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool ArgsInRegistersSupported() {
- return op_ == Token::ADD || op_ == Token::SUB
- || op_ == Token::MUL || op_ == Token::DIV;
- }
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {
- }
-
- // Compare two flat ascii strings and returns result in eax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index b57cf3d0..ee945656 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -94,22 +94,33 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs,
+ RegList object_regs,
+ RegList non_object_regs,
bool convert_call_to_jmp) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
// Enter an internal frame.
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- __ PushRegistersFromMemory(pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ test(reg, Immediate(0xc0000000));
+ __ Assert(zero, "Unable to encode value as smi");
+ }
+ __ SmiTag(reg);
+ __ push(reg);
+ }
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -117,12 +128,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break()));
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- __ PopRegistersToMemory(pointer_regs);
+ // stack.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, Immediate(kDebugZapValue));
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ __ SmiUntag(reg);
+ }
+ }
// Get rid of the internal frame.
__ LeaveInternalFrame();
@@ -130,12 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ pop(eax);
+ __ add(Operand(esp), Immediate(kPointerSize));
}
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -151,7 +172,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- eax : receiver
// -- ecx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
}
@@ -162,7 +183,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : name
// -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
@@ -172,7 +194,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// -- edx : receiver
// -- eax : key
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
}
@@ -183,19 +205,17 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : key
// -- edx : receiver
// -----------------------------------
- // Register eax contains an object that needs to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- ecx: name
// -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
}
@@ -204,10 +224,11 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// eax is the actual number of arguments not encoded as a smi see comment
// above IC call.
// ----------- S t a t e -------------
- // -- eax: number of arguments
+ // -- eax: number of arguments (not smi)
+ // -- edi: constructor function
// -----------------------------------
// The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
}
@@ -216,7 +237,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: return value
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), true);
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
}
@@ -225,7 +246,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, 0, 0, false);
}
@@ -245,7 +266,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0, true);
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 212cfdea..9baf7633 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -35,21 +35,6 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute the stack pointer.
@@ -58,58 +43,11 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 684ee14d..1631b043 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -216,12 +217,28 @@ void FullCodeGenerator::EmitReturnSequence() {
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
+ // We never generate inlined constant smi operations for these.
+ return kNoConstants;
+ } else if (right->IsSmiLiteral()) {
+ return kRightConstant;
+ } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
+ return kLeftConstant;
+ } else {
+ return kNoConstants;
+ }
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -246,20 +263,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -289,20 +293,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
Move(result_register(), slot);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(result_register(), slot);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -330,20 +321,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit->handle());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(result_register(), lit->handle());
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -371,20 +349,7 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
__ pop(result_register());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- __ mov(result_register(), Operand(esp, 0));
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -420,56 +385,7 @@ void FullCodeGenerator::DropAndApply(int count,
// For simplicity we always test the accumulator register.
__ Drop(count);
if (!reg.is(result_register())) __ mov(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ mov(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ mov(result_register(), reg);
- __ mov(Operand(esp, 0), result_register());
- break;
- }
- DoTest(context);
- break;
- }
-}
-
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -510,32 +426,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- __ push(Immediate(Factory::true_value()));
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- __ push(Immediate(Factory::false_value()));
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -563,78 +453,19 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ mov(result_register(), Factory::false_value());
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) __ push(Immediate(Factory::false_value()));
- break;
- }
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ mov(result_register(), Factory::true_value());
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) __ push(Immediate(Factory::true_value()));
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
- __ jmp(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is in the accumulator. If the value might be needed
- // on the stack (value/test and test/value contexts with a stack location
- // desired), then the value is already duplicated on the stack.
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
- // In value/test and test/value expression contexts with stack as the
- // desired location, there is already an extra value on the stack. Use a
- // label to discard it if unneeded.
- Label discard;
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_false = &discard;
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_true = &discard;
- break;
- }
- break;
- }
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Emit the inlined tests assumed by the stub.
__ cmp(result_register(), Factory::undefined_value());
__ j(equal, if_false);
@@ -648,83 +479,28 @@ void FullCodeGenerator::DoTest(Expression::Context context) {
__ test(result_register(), Immediate(kSmiTagMask));
__ j(zero, if_true);
- // Save a copy of the value if it may be needed and isn't already saved.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- }
-
// Call the ToBoolean stub for all other cases.
ToBooleanStub stub;
__ push(result_register());
__ CallStub(&stub);
__ test(eax, Operand(eax));
- // The stub returns nonzero for true. Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ j(not_zero, true_label_);
- __ jmp(false_label_);
- break;
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ j(zero, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ j(not_zero, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ j(not_zero, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ j(zero, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
@@ -908,20 +684,21 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
+ // Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
- __ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ mov(ecx, edx);
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(equal, true);
__ CallStub(&stub);
__ test(eax, Operand(eax));
@@ -1203,7 +980,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, Factory::undefined_value());
__ j(not_equal, &materialized);
@@ -1391,10 +1168,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1405,57 +1183,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(eax); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1496,14 +1287,325 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- __ push(result_register());
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS,
- TypeInfo::Unknown());
+void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ __ add(Operand(eax), Immediate(value));
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // Undo the optimistic add operation and call the shared stub.
+ __ bind(&call_stub);
+ __ sub(Operand(eax), Immediate(value));
+ Token::Value op = Token::ADD;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(eax);
+ } else {
+ __ push(eax);
+ __ push(Immediate(value));
+ }
+ __ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ if (left_is_constant_smi) {
+ __ mov(ecx, eax);
+ __ mov(eax, Immediate(value));
+ __ sub(Operand(eax), ecx);
+ } else {
+ __ sub(Operand(eax), Immediate(value));
+ }
+ __ j(overflow, &call_stub);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ __ bind(&call_stub);
+ if (left_is_constant_smi) {
+ __ push(Immediate(value));
+ __ push(ecx);
+ } else {
+ // Undo the optimistic sub operation.
+ __ add(Operand(eax), Immediate(value));
+
+ __ push(eax);
+ __ push(Immediate(value));
+ }
+
+ Token::Value op = Token::SUB;
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
__ CallStub(&stub);
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label call_stub, smi_case, done;
+ int shift_value = value->value() & 0x1f;
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&call_stub);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ __ push(eax);
+ __ push(Immediate(value));
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SHL:
+ if (shift_value != 0) {
+ __ mov(edx, eax);
+ if (shift_value > 1) {
+ __ shl(edx, shift_value - 1);
+ }
+ // Convert int result to smi, checking that it is in int range.
+ ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ __ add(edx, Operand(edx));
+ __ j(overflow, &call_stub);
+ __ mov(eax, edx); // Put result back into eax.
+ }
+ break;
+ case Token::SAR:
+ if (shift_value != 0) {
+ __ sar(eax, shift_value);
+ __ and_(eax, ~kSmiTagMask);
+ }
+ break;
+ case Token::SHR:
+ if (shift_value < 2) {
+ __ mov(edx, eax);
+ __ SmiUntag(edx);
+ __ shr(edx, shift_value);
+ __ test(edx, Immediate(0xc0000000));
+ __ j(not_zero, &call_stub);
+ __ SmiTag(edx);
+ __ mov(eax, edx); // Put result back into eax.
+ } else {
+ __ SmiUntag(eax);
+ __ shr(eax, shift_value);
+ __ SmiTag(eax);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Smi* value) {
+ Label smi_case, done;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ // The order of the arguments does not matter for bit-ops with a
+ // constant operand.
+ __ push(Immediate(value));
+ __ push(eax);
+ __ CallStub(&stub);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::BIT_OR:
+ __ or_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(Operand(eax), Immediate(value));
+ break;
+ case Token::BIT_AND:
+ __ and_(Operand(eax), Immediate(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ EmitConstantSmiBitOp(expr, op, context, mode, value);
+ break;
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ ASSERT(!left_is_constant_smi);
+ EmitConstantSmiShiftOp(expr, op, context, mode, value);
+ break;
+ case Token::ADD:
+ EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value);
+ break;
+ case Token::SUB:
+ EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ if (constant == kRightConstant) {
+ Smi* value = Smi::cast(*right->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, false, value);
+ return;
+ } else if (constant == kLeftConstant) {
+ Smi* value = Smi::cast(*left->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, context, mode, true, value);
+ return;
+ }
+
+ // Do combined smi check of the operands. Left operand is on the
+ // stack. Right operand is in eax.
+ Label done, stub_call, smi_case;
+ __ pop(edx);
+ __ mov(ecx, eax);
+ __ or_(eax, Operand(edx));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &smi_case);
+
+ __ bind(&stub_call);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (stub.ArgsInRegistersSupported()) {
+ stub.GenerateCall(masm_, edx, ecx);
+ } else {
+ __ push(edx);
+ __ push(ecx);
+ __ CallStub(&stub);
+ }
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ __ mov(eax, edx); // Copy left operand in case of a stub call.
+
+ switch (op) {
+ case Token::SAR:
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ sar_cl(eax); // No checks of result necessary
+ __ SmiTag(eax);
+ break;
+ case Token::SHL: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shl_cl(eax);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(positive, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::SHR: {
+ Label result_ok;
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
+ __ shr_cl(eax);
+ __ test(eax, Immediate(0xc0000000));
+ __ j(zero, &result_ok);
+ __ SmiTag(ecx);
+ __ jmp(&stub_call);
+ __ bind(&result_ok);
+ __ SmiTag(eax);
+ break;
+ }
+ case Token::ADD:
+ __ add(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::SUB:
+ __ sub(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(eax);
+ __ imul(eax, Operand(ecx));
+ __ j(overflow, &stub_call);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &done, taken);
+ __ mov(ebx, edx);
+ __ or_(ebx, Operand(ecx));
+ __ j(negative, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ or_(eax, Operand(ecx));
+ break;
+ case Token::BIT_AND:
+ __ and_(eax, Operand(ecx));
+ break;
+ case Token::BIT_XOR:
+ __ xor_(eax, Operand(ecx));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ Apply(context, eax);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode) {
+ TypeInfo type = TypeInfo::Unknown();
+ GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
+ if (stub.ArgsInRegistersSupported()) {
+ __ pop(edx);
+ stub.GenerateCall(masm_, edx, eax);
+ } else {
+ __ push(result_register());
+ __ CallStub(&stub);
+ }
Apply(context, eax);
}
@@ -1920,11 +2022,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
- VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForValue(expr->expression(), kStack);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -1937,16 +2039,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into edi and eax.
+ // Load function and argument count into edi and eax.
__ Set(eax, Immediate(arg_count));
- // Function is in esp[arg_count + 1].
- __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ mov(edi, Operand(esp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in eax, or pop it.
- DropAndApply(1, context_, eax);
+ Apply(context_, eax);
}
@@ -1958,11 +2057,12 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1976,11 +2076,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
- __ j(zero, if_true);
- __ jmp(if_false);
+ Split(zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -1994,7 +2095,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
@@ -2009,8 +2112,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(below, if_false);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
- __ j(below_equal, if_true);
- __ jmp(if_false);
+ Split(below_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2024,13 +2126,14 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
- __ j(above_equal, if_true);
- __ jmp(if_false);
+ Split(above_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2044,15 +2147,16 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
- __ jmp(if_false);
+ Split(not_zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2067,7 +2171,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -2085,13 +2191,14 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2105,13 +2212,14 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2125,13 +2233,14 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ test(eax, Immediate(kSmiTagMask));
__ j(equal, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2144,7 +2253,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2160,8 +2271,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2177,12 +2287,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(ebx);
__ cmp(eax, Operand(ebx));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2640,14 +2751,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
- __ CallRuntime(Runtime::kRegExpCloneResult, 1);
- Apply(context_, eax);
-}
-
-
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
@@ -2745,6 +2848,46 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ test(FieldOperand(eax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ Split(zero, if_true, if_false, fall_through);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(eax);
+ }
+
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ IndexFromHash(eax, eax);
+
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2845,19 +2988,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- switch (location_) {
- case kAccumulator:
- __ mov(result_register(), Factory::undefined_value());
- break;
- case kStack:
- __ push(Immediate(Factory::undefined_value()));
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2866,45 +2997,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
-
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(eax, CodeGenerator::GlobalObject());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ call(ic, RelocInfo::CODE_TARGET);
- __ push(eax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, eax);
break;
@@ -2925,9 +3033,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2941,28 +3047,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register eax.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &call_stub);
+ __ lea(eax, Operand(eax, kSmiTagMask));
+ __ not_(eax);
+ __ jmp(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ jmp(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ not_(result_register());
- __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag.
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, eax);
break;
}
@@ -2974,6 +3078,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// as the left-hand side.
if (!expr->expression()->IsValidLeftHandSide()) {
@@ -3022,8 +3128,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ }
__ push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&no_conversion);
@@ -3038,8 +3146,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -3060,7 +3166,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
@@ -3146,68 +3252,117 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
}
}
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmp(obj, Operand(null_const));
- if (strict) {
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // Check for undetectable objects => false.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ Split(below, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(eax, Factory::true_value());
__ j(equal, if_true);
- } else {
+ __ cmp(eax, Factory::false_value());
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(eax, Factory::undefined_value());
__ j(equal, if_true);
- __ cmp(obj, Factory::undefined_value());
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // Check for undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
- __ test(obj, Immediate(kSmiTagMask));
+ // Regular expressions => 'function' (they are callable).
+ __ CmpInstanceType(edx, JS_REGEXP_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, if_false);
- // It can be an undetectable object.
- __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, edx);
+ __ j(equal, if_false);
+ // Check for undetectable objects => false.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ // Check for JS objects => true.
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, if_false);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ Split(less_equal, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
@@ -3215,7 +3370,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
switch (expr->op()) {
@@ -3223,8 +3390,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
@@ -3232,8 +3398,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
break;
}
@@ -3241,28 +3407,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
- case Token::EQ: {
+ case Token::EQ:
cc = equal;
__ pop(edx);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, edx, eax, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, eax, edx, if_true, if_false, ecx);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = less;
__ pop(edx);
@@ -3289,23 +3441,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case, not_taken);
- __ cmp(edx, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3315,6 +3465,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ cmp(eax, Factory::null_value());
+ if (expr->is_strict()) {
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ __ j(equal, if_true);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(equal, if_true);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, if_false);
+ // It can be an undetectable object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(edx, Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 283ae4dc..3d0bd796 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -519,31 +519,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(hash, String::kArrayIndexValueMask);
- __ shr(hash, String::kHashShift - kSmiTagSize);
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ mov(key, hash);
-}
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -704,7 +679,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, eax, ebx);
+ __ IndexFromHash(ebx, eax);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1565,7 +1540,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, ecx, ebx);
+ __ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 79b40641..c2151424 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -191,81 +191,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), reg);
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(reg, Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PushRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Push the content of the memory location to the stack.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- push(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::PopRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Pop the content from the stack to the memory location.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- pop(Operand::StaticVariable(reg_addr));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- mov(scratch, Operand(base, 0));
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- mov(Operand::StaticVariable(reg_addr), scratch);
- lea(base, Operand(base, kPointerSize));
- }
- }
-}
-
void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
@@ -274,6 +199,7 @@ void MacroAssembler::DebugBreak() {
}
#endif
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, Operand(dst)); // shorter than mov
@@ -377,6 +303,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ test(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is not a string");
+ push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(below, "Operand is not a string");
+}
+
+
void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is a smi");
@@ -405,7 +342,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
-void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
+
+void MacroAssembler::EnterExitFramePrologue() {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -413,7 +351,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
push(ebp);
mov(ebp, Operand(esp));
- // Reserve room for entry stack pointer and push the debug marker.
+ // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
@@ -425,21 +363,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
mov(Operand::StaticVariable(context_address), esi);
}
-void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // TODO(1243899): This should be symmetric to
- // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
- // correct here, but computed for the other call. Very error
- // prone! FIX THIS. Actually there are deeper problems with
- // register saving than this asymmetry (see the bug report
- // associated with this issue).
- PushRegistersFromMemory(kJSCallerSaved);
- }
-#endif
+void MacroAssembler::EnterExitFrameEpilogue(int argc) {
// Reserve space for arguments.
sub(Operand(esp), Immediate(argc * kPointerSize));
@@ -455,44 +380,30 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
- EnterExitFramePrologue(mode);
+void MacroAssembler::EnterExitFrame() {
+ EnterExitFramePrologue();
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
- EnterExitFrameEpilogue(mode, 2);
+ EnterExitFrameEpilogue(2);
}
-void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+void MacroAssembler::EnterApiExitFrame(int stack_space,
int argc) {
- EnterExitFramePrologue(mode);
+ EnterExitFramePrologue();
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
- EnterExitFrameEpilogue(mode, argc);
+ EnterExitFrameEpilogue(argc);
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // It's okay to clobber register ebx below because we don't need
- // the function pointer after this.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- lea(ebx, Operand(ebp, kOffset));
- CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
- }
-#endif
-
+void MacroAssembler::LeaveExitFrame() {
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1040,6 +951,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ and_(hash, String::kArrayIndexValueMask);
+ STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
+ if (String::kHashShift > kSmiTagSize) {
+ shr(hash, String::kHashShift - kSmiTagSize);
+ }
+ if (!index.is(hash)) {
+ mov(index, hash);
+ }
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
@@ -1369,6 +1299,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ mov(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map) {
+ // Load the initial map. The global functions all have initial maps.
+ mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (FLAG_debug_code) {
+ Label ok, fail;
+ CheckMap(map, Factory::meta_map(), &fail, false);
+ jmp(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
+}
+
void MacroAssembler::Ret() {
ret(0);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e5abfb46..5e850c01 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -99,13 +99,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void PushRegistersFromMemory(RegList regs);
- void PopRegistersToMemory(RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -128,18 +121,25 @@ class MacroAssembler: public Assembler {
// Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(ExitFrame::Mode mode);
+ void EnterExitFrame();
- void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
+ void EnterApiExitFrame(int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame();
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global function with the given index.
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same.
+ void LoadGlobalFunctionInitialMap(Register function, Register map);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -267,6 +267,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -396,6 +399,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// ---------------------------------------------------------------------------
// Runtime calls
@@ -564,8 +573,8 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
- void EnterExitFramePrologue(ExitFrame::Mode mode);
- void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+ void EnterExitFramePrologue();
+ void EnterExitFrameEpilogue(int argc);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index a7930fb1..2aab7a8d 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -31,11 +31,9 @@
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 {
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index c6c65f07..7fc3f811 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -257,16 +257,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
- // Load the global or builtins object from the current context.
- __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ mov(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ mov(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadGlobalFunction(index, prototype);
+ __ LoadGlobalFunctionInitialMap(prototype, prototype);
// Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
}
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index ff9132cf..5f1e1e4e 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1143,9 +1143,9 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
- PushElementAt(arg_count + 1);
+ PushElementAt(arg_count);
Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(edi);
// Constructors are called with the number of arguments in register
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 70bbaf8c..94dbd5f5 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -108,10 +108,10 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
}
-Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
+JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
ASSERT(map_owner->IsJSObject());
- return JSObject::cast(map_owner)->map();
+ return JSObject::cast(map_owner);
}
diff --git a/src/ic.cc b/src/ic.cc
index a5370a6f..b4a333ec 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -165,14 +165,14 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
- // IC::GetCodeCacheMap is not applicable.
+ // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
- // IC::GetCodeCacheMap is not applicable.
+ // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
}
- Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
+ Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
diff --git a/src/ic.h b/src/ic.h
index a02f272f..17450cc3 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -123,8 +123,8 @@ class IC {
JSObject* holder);
static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
JSObject* holder);
- static inline Map* GetCodeCacheMap(Object* object,
- InlineCacheHolderFlag holder);
+ static inline JSObject* GetCodeCacheHolder(Object* object,
+ InlineCacheHolderFlag holder);
protected:
Address fp() const { return fp_; }
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 5f7e583c..5a8749e5 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -739,7 +739,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
- array->SetElement(i, *(info_wrapper.GetJSArray()));
+ SetElement(array, i, info_wrapper.GetJSArray());
}
}
@@ -802,25 +802,6 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
};
-class FrameCookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::CookFramesForThread(top);
- }
-};
-
-class FrameUncookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::UncookFramesForThread(top);
- }
-};
-
-static void IterateAllThreads(ThreadVisitor* visitor) {
- Top::IterateThread(visitor);
- ThreadManager::IterateArchivedThreads(visitor);
-}
-
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
@@ -836,13 +817,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
- FrameCookingThreadVisitor cooking_visitor;
- IterateAllThreads(&cooking_visitor);
-
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
-
- FrameUncookingThreadVisitor uncooking_visitor;
- IterateAllThreads(&uncooking_visitor);
}
// Now iterate over all pointers of all objects, including code_target
@@ -1384,8 +1359,9 @@ static const char* DropActivationsInActiveThread(
for (int i = 0; i < array_len; i++) {
if (result->GetElement(i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
- result->SetElement(i, Smi::FromInt(
- LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ Handle<Object> replaced(
+ Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ SetElement(result, i, replaced);
}
}
return NULL;
diff --git a/src/log.cc b/src/log.cc
index e083f01a..0bca5ebd 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
@@ -1266,7 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
case Code::BINARY_OP_IC:
// fall through
case Code::STUB:
- description = CodeStub::MajorName(code_object->major_key(), true);
+ description =
+ CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
if (description == NULL)
description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 686a61c3..d261f57d 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -83,4 +83,31 @@ const int kInvalidProtoDepth = -1;
#error Unsupported target architecture.
#endif
+namespace v8 {
+namespace internal {
+
+// Support for "structured" code comments.
+#ifdef DEBUG
+
+class Comment {
+ public:
+ Comment(MacroAssembler* masm, const char* msg);
+ ~Comment();
+
+ private:
+ MacroAssembler* masm_;
+ const char* msg_;
+};
+
+#else
+
+class Comment {
+ public:
+ Comment(MacroAssembler*, const char*) {}
+};
+
+#endif // DEBUG
+
+} } // namespace v8::internal
+
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/src/macros.py b/src/macros.py
index 643a2851..1ceb6201 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -120,7 +120,7 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e7a26194..162b3d63 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -85,11 +85,15 @@ void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
+ Heap::MarkMapPointersAsEncoded(true);
UpdatePointers();
+ Heap::MarkMapPointersAsEncoded(false);
+ PcToCodeCache::FlushPcToCodeCache();
RelocateObjects();
} else {
SweepSpaces();
+ PcToCodeCache::FlushPcToCodeCache();
}
Finish();
@@ -1185,8 +1189,6 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// pair of distinguished invalid map encodings (for single word and multiple
// words) to indicate free regions in the page found during computation of
// forwarding addresses and skipped over in subsequent sweeps.
-static const uint32_t kSingleFreeEncoding = 0;
-static const uint32_t kMultiFreeEncoding = 1;
// Encode a free region, defined by the given start address and size, in the
@@ -1194,10 +1196,10 @@ static const uint32_t kMultiFreeEncoding = 1;
void EncodeFreeRegion(Address free_start, int free_size) {
ASSERT(free_size >= kIntSize);
if (free_size == kIntSize) {
- Memory::uint32_at(free_start) = kSingleFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
} else {
ASSERT(free_size >= 2 * kIntSize);
- Memory::uint32_at(free_start) = kMultiFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Memory::int_at(free_start + kIntSize) = free_size;
}
@@ -1627,7 +1629,7 @@ static void SweepNewSpace(NewSpace* space) {
}
-static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
@@ -1668,10 +1670,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start,
- static_cast<int>(current - free_start),
- true,
- false);
+ space->DeallocateBlock(free_start,
+ static_cast<int>(current - free_start),
+ true);
is_previous_alive = true;
}
} else {
@@ -1701,7 +1702,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false, true);
+ space->DeallocateBlock(free_start, size_in_bytes, false);
}
}
} else {
@@ -1717,7 +1718,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) {
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
- dealloc(last_free_start, last_free_size, true, true);
+ space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1748,7 +1749,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false, true);
+ space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
@@ -1769,61 +1770,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
}
-void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Objects in map space are assumed to have size Map::kSize and a
- // valid map in their first word. Thus, we break the free block up into
- // chunks and free them separately.
- ASSERT(size_in_bytes % Map::kSize == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += Map::kSize) {
- Heap::map_space()->Free(a, add_to_freelist);
- }
-}
-
-
-void MarkCompactCollector::DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Free-list elements in cell space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = Heap::cell_space()->object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Heap::cell_space()->Free(a, add_to_freelist);
- }
-}
-
-
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
@@ -2088,14 +2034,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
- SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
- SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
- SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
+ SweepSpace(Heap::old_pointer_space());
+ SweepSpace(Heap::old_data_space());
+ SweepSpace(Heap::code_space());
+ SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
- SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
diff --git a/src/mark-compact.h b/src/mark-compact.h
index a5fd8d31..72a6fa3b 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -36,15 +36,6 @@ namespace internal {
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-// Callback function for non-live blocks in the old generation.
-// If add_to_freelist is false then just accounting stats are updated and
-// no attempt to add area to free list is made.
-typedef void (*DeallocateFunction)(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
-
// Forward declarations.
class RootMarkingVisitor;
class MarkingVisitor;
@@ -121,11 +112,17 @@ class MarkCompactCollector: public AllStatic {
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
+ static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj);
+ // Distinguishable invalid map encodings (for single word and multiple words)
+ // that indicate free regions.
+ static const uint32_t kSingleFreeEncoding = 0;
+ static const uint32_t kMultiFreeEncoding = 1;
+
private:
#ifdef DEBUG
enum CollectorState {
@@ -323,33 +320,6 @@ class MarkCompactCollector: public AllStatic {
static int IterateLiveObjectsInRange(Address start, Address end,
HeapObjectCallback size_func);
- // Callback functions for deallocating non-live blocks in the old
- // generation.
- static void DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
diff --git a/src/memory.h b/src/memory.h
index 503492a4..27f32f7a 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -36,6 +36,10 @@ namespace internal {
class Memory {
public:
+ static uint8_t& uint8_at(Address addr) {
+ return *reinterpret_cast<uint8_t*>(addr);
+ }
+
static uint16_t& uint16_at(Address addr) {
return *reinterpret_cast<uint16_t*>(addr);
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 91aba269..6d49d750 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -30,6 +30,7 @@
#include "disassembler.h"
#include "disasm.h"
#include "jsregexp.h"
+#include "objects-visiting.h"
namespace v8 {
namespace internal {
@@ -648,6 +649,17 @@ void Map::MapVerify() {
}
+void Map::NormalizedMapVerify() {
+ MapVerify();
+ ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
+ ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
+ ASSERT_EQ(0, pre_allocated_property_fields());
+ ASSERT_EQ(0, unused_property_fields());
+ ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
+ visitor_id());
+}
+
+
void CodeCache::CodeCachePrint() {
HeapObject::PrintHeader("CodeCache");
PrintF("\n - default_cache: ");
@@ -1363,6 +1375,21 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() {
}
+void NormalizedMapCache::NormalizedMapCacheVerify() {
+ FixedArray::cast(this)->Verify();
+ if (FLAG_enable_slow_asserts) {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ if (e->IsMap()) {
+ Map::cast(e)->NormalizedMapVerify();
+ } else {
+ ASSERT(e->IsUndefined());
+ }
+ }
+ }
+}
+
+
#endif // DEBUG
} } // namespace v8::internal
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0ef39fcd..2a54062f 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -35,11 +35,13 @@
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
-#include "memory.h"
+#include "objects.h"
#include "contexts.h"
#include "conversions-inl.h"
-#include "objects.h"
+#include "heap.h"
+#include "memory.h"
#include "property.h"
+#include "spaces.h"
namespace v8 {
namespace internal {
@@ -575,6 +577,18 @@ bool Object::IsJSFunctionResultCache() {
}
+bool Object::IsNormalizedMapCache() {
+ if (!IsFixedArray()) return false;
+ if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) {
+ return false;
+ }
+#ifdef DEBUG
+ reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
+#endif
+ return true;
+}
+
+
bool Object::IsCompilationCacheTable() {
return IsHashTable();
}
@@ -1660,6 +1674,7 @@ CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
+CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(MapCache)
@@ -2306,14 +2321,13 @@ int Code::arguments_count() {
}
-CodeStub::Major Code::major_key() {
+int Code::major_key() {
ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
- return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
- kStubMajorKeyOffset));
+ return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
}
-void Code::set_major_key(CodeStub::Major major) {
+void Code::set_major_key(int major) {
ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
ASSERT(0 <= major && major < 256);
WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
@@ -2936,7 +2950,7 @@ byte* Code::entry() {
bool Code::contains(byte* pc) {
return (instruction_start() <= pc) &&
- (pc < instruction_start() + instruction_size());
+ (pc <= instruction_start() + instruction_size());
}
diff --git a/src/objects.cc b/src/objects.cc
index 1a4ed055..9b43d245 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -54,7 +54,8 @@ const int kGetterIndex = 0;
const int kSetterIndex = 1;
-static Object* CreateJSValue(JSFunction* constructor, Object* value) {
+MUST_USE_RESULT static Object* CreateJSValue(JSFunction* constructor,
+ Object* value) {
Object* result = Heap::AllocateJSObject(constructor);
if (result->IsFailure()) return result;
JSValue::cast(result)->set_value(value);
@@ -2098,6 +2099,124 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
}
+bool NormalizedMapCache::IsCacheable(JSObject* object) {
+ // Caching for global objects is not worth it (there are too few of them).
+ return !object->IsGlobalObject();
+}
+
+
+Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
+ Object* result;
+
+ Map* fast = obj->map();
+ if (!IsCacheable(obj)) {
+ result = fast->CopyNormalized(mode);
+ if (result->IsFailure()) return result;
+ } else {
+ int index = Hash(fast) % kEntries;
+ result = get(index);
+
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ // Make sure that the new slow map has exactly the same hash as the
+ // original fast map. This way we can use hash to check if a slow map
+ // is already in the hash (see Contains method).
+ ASSERT(Hash(fast) == Hash(Map::cast(result)));
+ // The cached map should match newly created normalized map bit-by-bit.
+ Object* fresh = fast->CopyNormalized(mode);
+ if (!fresh->IsFailure()) {
+ // Copy the unused byte so that the assertion below works.
+ Map::cast(fresh)->address()[Map::kUnusedOffset] =
+ Map::cast(result)->address()[Map::kUnusedOffset];
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kSize) == 0);
+ }
+ }
+#endif
+ return result;
+ }
+
+ result = fast->CopyNormalized(mode);
+ if (result->IsFailure()) return result;
+ set(index, result);
+ }
+ Counters::normalized_maps.Increment();
+
+ return result;
+}
+
+
+bool NormalizedMapCache::Contains(Map* map) {
+ // If the map is present in the cache it can only be at one place:
+ // at the index calculated from the hash. We assume that a slow map has the
+ // same hash as a fast map it has been generated from.
+ int index = Hash(map) % kEntries;
+ return get(index) == map;
+}
+
+
+void NormalizedMapCache::Clear() {
+ int entries = length();
+ for (int i = 0; i != entries; i++) {
+ set_undefined(i);
+ }
+}
+
+
+int NormalizedMapCache::Hash(Map* fast) {
+ // For performance reasons we only hash the 3 most variable fields of a map:
+ // constructor, prototype and bit_field2.
+
+ // Shift away the tag.
+ int hash = (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
+
+ // XOR-ing the prototype and constructor directly yields too many zero bits
+ // when the two pointers are close (which is fairly common).
+ // To avoid this we shift the prototype 4 bits relatively to the constructor.
+ hash ^= (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
+
+ return hash ^ (hash >> 16) ^ fast->bit_field2();
+}
+
+
+bool NormalizedMapCache::CheckHit(Map* slow,
+ Map* fast,
+ PropertyNormalizationMode mode) {
+#ifdef DEBUG
+ slow->NormalizedMapVerify();
+#endif
+ return
+ slow->constructor() == fast->constructor() &&
+ slow->prototype() == fast->prototype() &&
+ slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+ 0 :
+ fast->inobject_properties()) &&
+ slow->instance_type() == fast->instance_type() &&
+ slow->bit_field() == fast->bit_field() &&
+ slow->bit_field2() == fast->bit_field2();
+}
+
+
+Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
+ if (!HasFastProperties() &&
+ NormalizedMapCache::IsCacheable(this) &&
+ Top::context()->global_context()->normalized_map_cache()->
+ Contains(map())) {
+ // Replace the map with the identical copy that can be safely modified.
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+ if (obj->IsFailure()) return obj;
+ Counters::normalized_maps.Increment();
+
+ set_map(Map::cast(obj));
+ }
+ return map()->UpdateCodeCache(name, code);
+}
+
+
Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int expected_additional_properties) {
if (!HasFastProperties()) return this;
@@ -2162,28 +2281,22 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int index = map()->instance_descriptors()->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index);
- // Allocate new map.
- obj = map()->CopyDropDescriptors();
+ obj = Top::context()->global_context()->
+ normalized_map_cache()->Get(this, mode);
if (obj->IsFailure()) return obj;
Map* new_map = Map::cast(obj);
- // Clear inobject properties if needed by adjusting the instance size and
- // putting in a filler object instead of the inobject properties.
- if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) {
- int instance_size_delta = map()->inobject_properties() * kPointerSize;
- int new_instance_size = map()->instance_size() - instance_size_delta;
- new_map->set_inobject_properties(0);
- new_map->set_instance_size(new_instance_size);
- new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map));
- Heap::CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- }
- new_map->set_unused_property_fields(0);
-
// We have now successfully allocated all the necessary objects.
// Changes can now be made with the guarantee that all of them take effect.
+
+ // Resize the object in the heap if necessary.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = map()->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Heap::CreateFillerObjectAt(this->address() + new_instance_size,
+ instance_size_delta);
+
set_map(new_map);
- map()->set_instance_descriptors(Heap::empty_descriptor_array());
set_properties(dictionary);
@@ -2571,7 +2684,8 @@ bool JSObject::ReferencesObject(Object* obj) {
Object* JSObject::PreventExtensions() {
// If there are fast elements we normalize.
if (HasFastElements()) {
- NormalizeElements();
+ Object* ok = NormalizeElements();
+ if (ok->IsFailure()) return ok;
}
// Make sure that we never go back to fast case.
element_dictionary()->set_requires_slow_elements();
@@ -3083,6 +3197,33 @@ Object* Map::CopyDropDescriptors() {
}
+Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+ int new_instance_size = instance_size();
+ if (mode == CLEAR_INOBJECT_PROPERTIES) {
+ new_instance_size -= inobject_properties() * kPointerSize;
+ }
+
+ Object* result = Heap::AllocateMap(instance_type(), new_instance_size);
+ if (result->IsFailure()) return result;
+
+ if (mode != CLEAR_INOBJECT_PROPERTIES) {
+ Map::cast(result)->set_inobject_properties(inobject_properties());
+ }
+
+ Map::cast(result)->set_prototype(prototype());
+ Map::cast(result)->set_constructor(constructor());
+
+ Map::cast(result)->set_bit_field(bit_field());
+ Map::cast(result)->set_bit_field2(bit_field2());
+
+#ifdef DEBUG
+ Map::cast(result)->NormalizedMapVerify();
+#endif
+
+ return result;
+}
+
+
Object* Map::CopyDropTransitions() {
Object* new_map = CopyDropDescriptors();
if (new_map->IsFailure()) return new_map;
@@ -4850,24 +4991,18 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
}
-static inline uint32_t HashField(uint32_t hash,
- bool is_array_index,
- int length = -1) {
- uint32_t result = (hash << String::kHashShift);
- if (is_array_index) {
- // For array indexes mix the length into the hash as an array index could
- // be zero.
- ASSERT(length > 0);
- ASSERT(length <= String::kMaxArrayIndexSize);
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
- result &= ~String::kIsNotArrayIndexMask;
- result |= length << String::kArrayIndexHashLengthShift;
- } else {
- result |= String::kIsNotArrayIndexMask;
- }
- return result;
+uint32_t StringHasher::MakeCachedArrayIndex(uint32_t value, int length) {
+ value <<= String::kHashShift;
+ // For array indexes mix the length into the hash as an array index could
+ // be zero.
+ ASSERT(length > 0);
+ ASSERT(length <= String::kMaxArrayIndexSize);
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
+ value &= ~String::kIsNotArrayIndexMask;
+ value |= length << String::kArrayIndexHashLengthShift;
+ return value;
}
@@ -4875,14 +5010,11 @@ uint32_t StringHasher::GetHashField() {
ASSERT(is_valid());
if (length_ <= String::kMaxHashCalcLength) {
if (is_array_index()) {
- return v8::internal::HashField(array_index(), true, length_);
- } else {
- return v8::internal::HashField(GetHash(), false);
+ return MakeCachedArrayIndex(array_index(), length_);
}
- uint32_t payload = v8::internal::HashField(GetHash(), false);
- return payload;
+ return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
} else {
- return v8::internal::HashField(length_, false);
+ return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
}
}
diff --git a/src/objects.h b/src/objects.h
index b23920ca..11d65ef4 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -29,7 +29,6 @@
#define V8_OBJECTS_H_
#include "builtins.h"
-#include "code-stubs.h"
#include "smart-pointer.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -638,6 +637,7 @@ class Object BASE_EMBEDDED {
inline bool IsDictionary();
inline bool IsSymbolTable();
inline bool IsJSFunctionResultCache();
+ inline bool IsNormalizedMapCache();
inline bool IsCompilationCacheTable();
inline bool IsCodeCacheHashTable();
inline bool IsMapCache();
@@ -1274,7 +1274,7 @@ class JSObject: public HeapObject {
Object* PrepareElementsForSort(uint32_t limit);
// As PrepareElementsForSort, but only on objects where elements is
// a dictionary, and it will stay a dictionary.
- Object* PrepareSlowElementsForSort(uint32_t limit);
+ MUST_USE_RESULT Object* PrepareSlowElementsForSort(uint32_t limit);
Object* SetProperty(String* key,
Object* value,
@@ -1312,12 +1312,13 @@ class JSObject: public HeapObject {
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- Object* SetNormalizedProperty(String* name,
- Object* value,
- PropertyDetails details);
+ MUST_USE_RESULT Object* SetNormalizedProperty(String* name,
+ Object* value,
+ PropertyDetails details);
// Deletes the named property in a normalized object.
- Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteNormalizedProperty(String* name,
+ DeleteMode mode);
// Returns the class name ([[Class]] property in the specification).
String* class_name();
@@ -1335,11 +1336,13 @@ class JSObject: public HeapObject {
String* name);
PropertyAttributes GetLocalPropertyAttribute(String* name);
- Object* DefineAccessor(String* name, bool is_getter, JSFunction* fun,
- PropertyAttributes attributes);
+ MUST_USE_RESULT Object* DefineAccessor(String* name,
+ bool is_getter,
+ JSFunction* fun,
+ PropertyAttributes attributes);
Object* LookupAccessor(String* name, bool is_getter);
- Object* DefineAccessor(AccessorInfo* info);
+ MUST_USE_RESULT Object* DefineAccessor(AccessorInfo* info);
// Used from Object::GetProperty().
Object* GetPropertyWithFailedAccessCheck(Object* receiver,
@@ -1390,8 +1393,8 @@ class JSObject: public HeapObject {
inline Object* GetHiddenPropertiesObject();
inline Object* SetHiddenPropertiesObject(Object* hidden_obj);
- Object* DeleteProperty(String* name, DeleteMode mode);
- Object* DeleteElement(uint32_t index, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteProperty(String* name, DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteElement(uint32_t index, DeleteMode mode);
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
@@ -1419,19 +1422,20 @@ class JSObject: public HeapObject {
bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
- Object* SetFastElement(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetFastElement(uint32_t index, Object* value);
// Set the index'th array element.
// A Failure object is returned if GC is needed.
- Object* SetElement(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetElement(uint32_t index, Object* value);
// Returns the index'th element.
// The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
- Object* SetFastElementsCapacityAndLength(int capacity, int length);
- Object* SetSlowElements(Object* length);
+ MUST_USE_RESULT Object* SetFastElementsCapacityAndLength(int capacity,
+ int length);
+ MUST_USE_RESULT Object* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
// objects.
@@ -1444,7 +1448,7 @@ class JSObject: public HeapObject {
bool HasRealNamedCallbackProperty(String* key);
// Initializes the array to a certain length
- Object* SetElementsLength(Object* length);
+ MUST_USE_RESULT Object* SetElementsLength(Object* length);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -1551,6 +1555,8 @@ class JSObject: public HeapObject {
int expected_additional_properties);
Object* NormalizeElements();
+ Object* UpdateMapCodeCache(String* name, Code* code);
+
// Transform slow named properties to fast variants.
// Returns failure if allocation failed.
Object* TransformToFastProperties(int unused_property_fields);
@@ -1579,7 +1585,7 @@ class JSObject: public HeapObject {
static inline JSObject* cast(Object* obj);
// Disalow further properties to be added to the object.
- Object* PreventExtensions();
+ MUST_USE_RESULT Object* PreventExtensions();
// Dispatched behavior.
@@ -1652,16 +1658,20 @@ class JSObject: public HeapObject {
uint32_t index,
Object* value,
JSObject* holder);
- Object* SetElementWithInterceptor(uint32_t index, Object* value);
- Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* SetElementWithInterceptor(uint32_t index,
+ Object* value);
+ MUST_USE_RESULT Object* SetElementWithoutInterceptor(uint32_t index,
+ Object* value);
Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
- Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode);
- Object* DeletePropertyWithInterceptor(String* name);
+ MUST_USE_RESULT Object* DeletePropertyPostInterceptor(String* name,
+ DeleteMode mode);
+ MUST_USE_RESULT Object* DeletePropertyWithInterceptor(String* name);
- Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode);
- Object* DeleteElementWithInterceptor(uint32_t index);
+ MUST_USE_RESULT Object* DeleteElementPostInterceptor(uint32_t index,
+ DeleteMode mode);
+ MUST_USE_RESULT Object* DeleteElementWithInterceptor(uint32_t index);
PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
String* name,
@@ -1683,13 +1693,14 @@ class JSObject: public HeapObject {
bool HasDenseElements();
bool CanSetCallback(String* name);
- Object* SetElementCallback(uint32_t index,
- Object* structure,
- PropertyAttributes attributes);
- Object* SetPropertyCallback(String* name,
- Object* structure,
- PropertyAttributes attributes);
- Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
+ MUST_USE_RESULT Object* SetElementCallback(uint32_t index,
+ Object* structure,
+ PropertyAttributes attributes);
+ MUST_USE_RESULT Object* SetPropertyCallback(String* name,
+ Object* structure,
+ PropertyAttributes attributes);
+ MUST_USE_RESULT Object* DefineGetterSetter(String* name,
+ PropertyAttributes attributes);
void LookupInDescriptor(String* name, LookupResult* result);
@@ -1728,13 +1739,13 @@ class FixedArray: public HeapObject {
// Copy operations.
inline Object* Copy();
- Object* CopySize(int new_length);
+ MUST_USE_RESULT Object* CopySize(int new_length);
// Add the elements of a JSArray to this FixedArray.
- Object* AddKeysFromJSArray(JSArray* array);
+ MUST_USE_RESULT Object* AddKeysFromJSArray(JSArray* array);
// Compute the union of this and other.
- Object* UnionOfKeys(FixedArray* other);
+ MUST_USE_RESULT Object* UnionOfKeys(FixedArray* other);
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -1873,11 +1884,12 @@ class DescriptorArray: public FixedArray {
// or null), its enumeration index is kept as is.
// If adding a real property, map transitions must be removed. If adding
// a transition, they must not be removed. All null descriptors are removed.
- Object* CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag);
+ MUST_USE_RESULT Object* CopyInsert(Descriptor* descriptor,
+ TransitionFlag transition_flag);
// Remove all transitions. Return a copy of the array with all transitions
// removed, or a Failure object if the new array could not be allocated.
- Object* RemoveTransitions();
+ MUST_USE_RESULT Object* RemoveTransitions();
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
@@ -1905,7 +1917,7 @@ class DescriptorArray: public FixedArray {
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- static Object* Allocate(int number_of_descriptors);
+ MUST_USE_RESULT static Object* Allocate(int number_of_descriptors);
// Casting.
static inline DescriptorArray* cast(Object* obj);
@@ -2045,8 +2057,9 @@ class HashTable: public FixedArray {
}
// Returns a new HashTable object. Might return Failure.
- static Object* Allocate(int at_least_space_for,
- PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT static Object* Allocate(
+ int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED);
// Returns the key at entry.
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
@@ -2140,7 +2153,7 @@ class HashTable: public FixedArray {
}
// Ensure enough space for n additional elements.
- Object* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT Object* EnsureCapacity(int n, Key key);
};
@@ -2156,7 +2169,7 @@ class HashTableKey {
virtual uint32_t HashForObject(Object* key) = 0;
// Returns the key object for storing into the hash table.
// If allocations fails a failure object is returned.
- virtual Object* AsObject() = 0;
+ MUST_USE_RESULT virtual Object* AsObject() = 0;
// Required.
virtual ~HashTableKey() {}
};
@@ -2172,7 +2185,7 @@ class SymbolTableShape {
static uint32_t HashForObject(HashTableKey* key, Object* object) {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2222,7 +2235,7 @@ class MapCacheShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -2310,7 +2323,7 @@ class Dictionary: public HashTable<Shape, Key> {
}
// Returns a new array for dictionary usage. Might return Failure.
- static Object* Allocate(int at_least_space_for);
+ MUST_USE_RESULT static Object* Allocate(int at_least_space_for);
// Ensure enough space for n additional elements.
Object* EnsureCapacity(int n, Key key);
@@ -2352,7 +2365,7 @@ class StringDictionaryShape {
static inline bool IsMatch(String* key, Object* other);
static inline uint32_t Hash(String* key);
static inline uint32_t HashForObject(String* key, Object* object);
- static inline Object* AsObject(String* key);
+ MUST_USE_RESULT static inline Object* AsObject(String* key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = true;
@@ -2384,7 +2397,7 @@ class NumberDictionaryShape {
static inline bool IsMatch(uint32_t key, Object* other);
static inline uint32_t Hash(uint32_t key);
static inline uint32_t HashForObject(uint32_t key, Object* object);
- static inline Object* AsObject(uint32_t key);
+ MUST_USE_RESULT static inline Object* AsObject(uint32_t key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
@@ -2465,6 +2478,35 @@ class JSFunctionResultCache: public FixedArray {
};
+// The cache for maps used by normalized (dictionary mode) objects.
+// Such maps do not have property descriptors, so a typical program
+// needs very limited number of distinct normalized maps.
+class NormalizedMapCache: public FixedArray {
+ public:
+ static const int kEntries = 64;
+
+ static bool IsCacheable(JSObject* object);
+
+ Object* Get(JSObject* object, PropertyNormalizationMode mode);
+
+ bool Contains(Map* map);
+
+ void Clear();
+
+ // Casting
+ static inline NormalizedMapCache* cast(Object* obj);
+
+#ifdef DEBUG
+ void NormalizedMapCacheVerify();
+#endif
+
+ private:
+ static int Hash(Map* fast);
+
+ static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
+};
+
+
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
@@ -2854,8 +2896,8 @@ class Code: public HeapObject {
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
// [major_key]: For kind STUB or BINARY_OP_IC, the major key.
- inline CodeStub::Major major_key();
- inline void set_major_key(CodeStub::Major major);
+ inline int major_key();
+ inline void set_major_key(int major);
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
@@ -3121,11 +3163,13 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
- Object* CopyDropDescriptors();
+ MUST_USE_RESULT Object* CopyDropDescriptors();
+
+ MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
- Object* CopyDropTransitions();
+ MUST_USE_RESULT Object* CopyDropTransitions();
// Returns this map if it has the fast elements bit set, otherwise
// returns a copy of the map, with all transitions dropped from the
@@ -3158,7 +3202,7 @@ class Map: public HeapObject {
inline void ClearCodeCache();
// Update code cache.
- Object* UpdateCodeCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateCodeCache(String* name, Code* code);
// Returns the found code or undefined if absent.
Object* FindInCodeCache(String* name, Code::Flags flags);
@@ -3185,6 +3229,7 @@ class Map: public HeapObject {
#ifdef DEBUG
void MapPrint();
void MapVerify();
+ void NormalizedMapVerify();
#endif
inline int visitor_id();
@@ -3219,6 +3264,8 @@ class Map: public HeapObject {
static const int kPreAllocatedPropertyFieldsOffset =
kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte;
// The byte at position 3 is not in use at the moment.
+ static const int kUnusedByte = 3;
+ static const int kUnusedOffset = kInstanceSizesOffset + kUnusedByte;
// Byte offsets within kInstanceAttributesOffset attributes.
static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
@@ -3684,7 +3731,7 @@ class JSFunction: public JSObject {
inline Object* prototype();
inline Object* instance_prototype();
Object* SetInstancePrototype(Object* value);
- Object* SetPrototype(Object* value);
+ MUST_USE_RESULT Object* SetPrototype(Object* value);
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
@@ -4025,7 +4072,7 @@ class CompilationCacheShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -4058,7 +4105,7 @@ class CodeCache: public Struct {
DECL_ACCESSORS(normal_type_cache, Object)
// Add the code object to the cache.
- Object* Update(String* name, Code* code);
+ MUST_USE_RESULT Object* Update(String* name, Code* code);
// Lookup code object in the cache. Returns code object if found and undefined
// if not.
@@ -4086,8 +4133,8 @@ class CodeCache: public Struct {
static const int kSize = kNormalTypeCacheOffset + kPointerSize;
private:
- Object* UpdateDefaultCache(String* name, Code* code);
- Object* UpdateNormalTypeCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateDefaultCache(String* name, Code* code);
+ MUST_USE_RESULT Object* UpdateNormalTypeCache(String* name, Code* code);
Object* LookupDefaultCache(String* name, Code::Flags flags);
Object* LookupNormalTypeCache(String* name, Code::Flags flags);
@@ -4115,7 +4162,7 @@ class CodeCacheHashTableShape {
return key->HashForObject(object);
}
- static Object* AsObject(HashTableKey* key) {
+ MUST_USE_RESULT static Object* AsObject(HashTableKey* key) {
return key->AsObject();
}
@@ -4128,7 +4175,7 @@ class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
HashTableKey*> {
public:
Object* Lookup(String* name, Code::Flags flags);
- Object* Put(String* name, Code* code);
+ MUST_USE_RESULT Object* Put(String* name, Code* code);
int GetIndex(String* name, Code::Flags flags);
void RemoveByIndex(int index);
@@ -4175,6 +4222,11 @@ class StringHasher {
void invalidate() { is_valid_ = false; }
+ // Calculated hash value for a string consisting of 1 to
+ // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
+ // value is represented decimal value.
+ static uint32_t MakeCachedArrayIndex(uint32_t value, int length);
+
private:
uint32_t array_index() {
@@ -4990,12 +5042,13 @@ class JSArray: public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi* length);
- Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value);
+ MUST_USE_RESULT Object* JSArrayUpdateLengthFromIndex(uint32_t index,
+ Object* value);
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- Object* Initialize(int capacity);
+ MUST_USE_RESULT Object* Initialize(int capacity);
// Set the content of the array to the content of storage.
inline void SetContent(FixedArray* storage);
diff --git a/src/parser.cc b/src/parser.cc
index 0fef2e2b..b689eb8c 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "compiler.h"
+#include "func-name-inferrer.h"
#include "messages.h"
#include "parser.h"
#include "platform.h"
@@ -153,7 +154,7 @@ class Parser {
ParserLog* log_;
bool is_pre_parsing_;
ScriptDataImpl* pre_data_;
- bool seen_loop_stmt_; // Used for inner loop detection.
+ FuncNameInferrer* fni_;
bool inside_with() const { return with_nesting_level_ > 0; }
ParserFactory* factory() const { return factory_; }
@@ -213,6 +214,11 @@ class Parser {
ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
+ Expression* NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position);
+
// Populate the constant properties fixed array for a materialized object
// literal.
void BuildObjectLiteralConstantProperties(
@@ -338,9 +344,7 @@ class Parser {
template <typename T, int initial_size>
class BufferedZoneList {
public:
-
- BufferedZoneList() :
- list_(NULL), last_(NULL) {}
+ BufferedZoneList() : list_(NULL), last_(NULL) {}
// Adds element at end of list. This element is buffered and can
// be read using last() or removed using RemoveLast until a new Add or until
@@ -411,6 +415,7 @@ class BufferedZoneList {
T* last_;
};
+
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder: public ZoneObject {
public:
@@ -649,6 +654,7 @@ class RegExpParser {
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
+
private:
enum SubexpressionType {
INITIAL,
@@ -744,6 +750,10 @@ class TemporaryScope BASE_EMBEDDED {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
+
+ void AddLoop() { loop_count_++; }
+ bool ContainsLoops() const { return loop_count_ > 0; }
+
private:
// Captures the number of literals that need materialization in the
// function. Includes regexp literals, and boilerplate for object
@@ -753,9 +763,14 @@ class TemporaryScope BASE_EMBEDDED {
// Properties count estimation.
int expected_property_count_;
+ // Keeps track of assignments to properties of this. Used for
+ // optimizing constructors.
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
+ // Captures the number of loops inside the scope.
+ int loop_count_;
+
// Bookkeeping
Parser* parser_;
TemporaryScope* parent_;
@@ -769,6 +784,7 @@ TemporaryScope::TemporaryScope(Parser* parser)
expected_property_count_(0),
only_simple_this_property_assignments_(false),
this_property_assignments_(Factory::empty_fixed_array()),
+ loop_count_(0),
parser_(parser),
parent_(parser->temp_scope_) {
parser->temp_scope_ = this;
@@ -851,11 +867,10 @@ class ParserLog BASE_EMBEDDED {
public:
virtual ~ParserLog() { }
- // Records the occurrence of a function. The returned object is
- // only guaranteed to be valid until the next function has been
- // logged.
+ // Records the occurrence of a function.
virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); }
-
+ // Return the current position in the function entry log.
+ virtual int position() { return 0; }
virtual void LogError() { }
};
@@ -896,76 +911,88 @@ class ParserRecorder: public ParserLog {
virtual void LogMessage(Scanner::Location loc,
const char* message,
Vector<const char*> args);
- void WriteString(Vector<const char> str);
- static const char* ReadString(unsigned* start, int* chars);
- List<unsigned>* store() { return &store_; }
+ Vector<unsigned> ExtractData() {
+ int total_size = ScriptDataImpl::kHeaderSize + store_.size();
+ Vector<unsigned> data = Vector<unsigned>::New(total_size);
+ memcpy(data.start(), preamble_, sizeof(preamble_));
+ if (ScriptDataImpl::kHeaderSize < total_size) {
+ store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize, total_size));
+ }
+ return data;
+ }
+ virtual int position() { return store_.size(); }
private:
- bool has_error_;
- List<unsigned> store_;
+ Collector<unsigned> store_;
+ unsigned preamble_[ScriptDataImpl::kHeaderSize];
+#ifdef DEBUG
+ int prev_start;
+#endif
+
+ bool has_error() {
+ return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
+ }
+ void WriteString(Vector<const char> str);
};
-FunctionEntry ScriptDataImpl::GetFunctionEnd(int start) {
- if (nth(last_entry_).start_pos() > start) {
- // If the last entry we looked up is higher than what we're
- // looking for then it's useless and we reset it.
- last_entry_ = 0;
- }
- for (int i = last_entry_; i < EntryCount(); i++) {
- FunctionEntry entry = nth(i);
- if (entry.start_pos() == start) {
- last_entry_ = i;
- return entry;
- }
+void ScriptDataImpl::SkipFunctionEntry(int start) {
+ ASSERT(index_ + FunctionEntry::kSize <= store_.length());
+ ASSERT(static_cast<int>(store_[index_]) == start);
+ index_ += FunctionEntry::kSize;
+}
+
+
+FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
+ // The current pre-data entry must be a FunctionEntry with the given
+ // start position.
+ if ((index_ + FunctionEntry::kSize <= store_.length())
+ && (static_cast<int>(store_[index_]) == start)) {
+ int index = index_;
+ index_ += FunctionEntry::kSize;
+ return FunctionEntry(store_.SubVector(index,
+ index + FunctionEntry::kSize));
}
return FunctionEntry();
}
bool ScriptDataImpl::SanityCheck() {
- if (store_.length() < static_cast<int>(ScriptDataImpl::kHeaderSize))
- return false;
- if (magic() != ScriptDataImpl::kMagicNumber)
- return false;
- if (version() != ScriptDataImpl::kCurrentVersion)
+ if (store_.length() < static_cast<int>(ScriptDataImpl::kHeaderSize)) {
return false;
+ }
+ if (magic() != ScriptDataImpl::kMagicNumber) return false;
+ if (version() != ScriptDataImpl::kCurrentVersion) return false;
return true;
}
-int ScriptDataImpl::EntryCount() {
- return (store_.length() - kHeaderSize) / FunctionEntry::kSize;
-}
-
-
-FunctionEntry ScriptDataImpl::nth(int n) {
- int offset = kHeaderSize + n * FunctionEntry::kSize;
- return FunctionEntry(Vector<unsigned>(store_.start() + offset,
- FunctionEntry::kSize));
-}
-
-
ParserRecorder::ParserRecorder()
- : has_error_(false), store_(4) {
- Vector<unsigned> preamble = store()->AddBlock(0, ScriptDataImpl::kHeaderSize);
- preamble[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
- preamble[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
- preamble[ScriptDataImpl::kHasErrorOffset] = false;
+ : store_(0) {
+#ifdef DEBUG
+ prev_start = -1;
+#endif
+ preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
+ preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
+ preamble_[ScriptDataImpl::kHasErrorOffset] = false;
+ preamble_[ScriptDataImpl::kSizeOffset] = 0;
+ ASSERT_EQ(4, ScriptDataImpl::kHeaderSize);
}
void ParserRecorder::WriteString(Vector<const char> str) {
- store()->Add(str.length());
- for (int i = 0; i < str.length(); i++)
- store()->Add(str[i]);
+ store_.Add(str.length());
+ for (int i = 0; i < str.length(); i++) {
+ store_.Add(str[i]);
+ }
}
-const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
+const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
int length = start[0];
char* result = NewArray<char>(length + 1);
- for (int i = 0; i < length; i++)
+ for (int i = 0; i < length; i++) {
result[i] = start[i + 1];
+ }
result[length] = '\0';
if (chars != NULL) *chars = length;
return result;
@@ -974,15 +1001,16 @@ const char* ParserRecorder::ReadString(unsigned* start, int* chars) {
void ParserRecorder::LogMessage(Scanner::Location loc, const char* message,
Vector<const char*> args) {
- if (has_error_) return;
- store()->Rewind(ScriptDataImpl::kHeaderSize);
- store()->at(ScriptDataImpl::kHasErrorOffset) = true;
- store()->Add(loc.beg_pos);
- store()->Add(loc.end_pos);
- store()->Add(args.length());
+ if (has_error()) return;
+ preamble_[ScriptDataImpl::kHasErrorOffset] = true;
+ store_.Reset();
+ store_.Add(loc.beg_pos);
+ store_.Add(loc.end_pos);
+ store_.Add(args.length());
WriteString(CStrVector(message));
- for (int i = 0; i < args.length(); i++)
+ for (int i = 0; i < args.length(); i++) {
WriteString(CStrVector(args[i]));
+ }
}
@@ -995,7 +1023,7 @@ Scanner::Location ScriptDataImpl::MessageLocation() {
const char* ScriptDataImpl::BuildMessage() {
unsigned* start = ReadAddress(3);
- return ParserRecorder::ReadString(start, NULL);
+ return ReadString(start, NULL);
}
@@ -1005,7 +1033,7 @@ Vector<const char*> ScriptDataImpl::BuildArgs() {
int pos = ScriptDataImpl::kHeaderSize + Read(3);
for (int i = 0; i < arg_count; i++) {
int count = 0;
- array[i] = ParserRecorder::ReadString(ReadAddress(pos), &count);
+ array[i] = ReadString(ReadAddress(pos), &count);
pos += count + 1;
}
return Vector<const char*>(array, arg_count);
@@ -1021,10 +1049,22 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
return &store_[ScriptDataImpl::kHeaderSize + position];
}
+void ScriptDataImpl::FindStart(int position) {
+ // Only search forwards, and linearly for now.
+ while ((index_ < store_.length())
+ && (static_cast<int>(store_[index_])) < position) {
+ index_ += FunctionEntry::kSize;
+ }
+}
+
FunctionEntry ParserRecorder::LogFunction(int start) {
- if (has_error_) return FunctionEntry();
- FunctionEntry result(store()->AddBlock(0, FunctionEntry::kSize));
+#ifdef DEBUG
+ ASSERT(start > prev_start);
+ prev_start = start;
+#endif
+ if (has_error()) return FunctionEntry();
+ FunctionEntry result(store_.AddBlock(FunctionEntry::kSize, 0));
result.set_start_pos(start);
return result;
}
@@ -1214,7 +1254,7 @@ Parser::Parser(Handle<Script> script,
log_(log),
is_pre_parsing_(is_pre_parsing == PREPARSE),
pre_data_(pre_data),
- seen_loop_stmt_(false) {
+ fni_(NULL) {
}
@@ -1243,6 +1283,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
HistogramTimerScope timer(&Counters::parse);
Counters::total_parse_size.Increment(source->length());
+ fni_ = new FuncNameInferrer();
// Initialize parser state.
source->TryFlatten();
@@ -1278,7 +1319,8 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
0,
0,
source->length(),
- false));
+ false,
+ temp_scope.ContainsLoops()));
} else if (scanner().stack_overflow()) {
Top::StackOverflow();
}
@@ -1303,6 +1345,9 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
HistogramTimerScope timer(&Counters::parse_lazy);
Counters::total_parse_size.Increment(source->length());
+ fni_ = new FuncNameInferrer();
+ fni_->PushEnclosingName(name);
+
// Initialize parser state.
source->TryFlatten();
scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
@@ -1375,7 +1420,8 @@ FunctionLiteral* Parser::ParseJson(Handle<String> source) {
0,
0,
source->length(),
- false));
+ false,
+ temp_scope.ContainsLoops()));
} else if (scanner().stack_overflow()) {
Top::StackOverflow();
}
@@ -2080,9 +2126,12 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
VariableProxy* last_var = NULL; // the last variable declared
int nvars = 0; // the number of variables declared
do {
+ if (fni_ != NULL) fni_->Enter();
+
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
Handle<String> name = ParseIdentifier(CHECK_OK);
+ if (fni_ != NULL) fni_->PushVariableName(name);
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
@@ -2134,6 +2183,8 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Expect(Token::ASSIGN, CHECK_OK);
position = scanner().location().beg_pos;
value = ParseAssignmentExpression(accept_IN, CHECK_OK);
+ // Don't infer if it is "a = function(){...}();"-like expression.
+ if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
}
// Make sure that 'const c' actually initializes 'c' to undefined
@@ -2210,6 +2261,8 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Assignment* assignment = NEW(Assignment(op, last_var, value, position));
if (block) block->AddStatement(NEW(ExpressionStatement(assignment)));
}
+
+ if (fni_ != NULL) fni_->Leave();
} while (peek() == Token::COMMA);
if (!is_const && nvars == 1) {
@@ -2639,6 +2692,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
+ temp_scope_->AddLoop();
DoWhileStatement* loop = NEW(DoWhileStatement(labels));
Target target(this, loop);
@@ -2663,9 +2717,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
if (loop != NULL) loop->Initialize(cond, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2674,6 +2725,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
+ temp_scope_->AddLoop();
WhileStatement* loop = NEW(WhileStatement(labels));
Target target(this, loop);
@@ -2685,9 +2737,6 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
if (loop != NULL) loop->Initialize(cond, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2696,6 +2745,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ temp_scope_->AddLoop();
Statement* init = NULL;
Expect(Token::FOR, CHECK_OK);
@@ -2721,9 +2771,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Block* result = NEW(Block(NULL, 2, false));
result->AddStatement(variable_statement);
result->AddStatement(loop);
-
- seen_loop_stmt_ = true;
-
// Parsed for-in loop w/ variable/const declaration.
return result;
}
@@ -2752,9 +2799,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Statement* body = ParseStatement(NULL, CHECK_OK);
if (loop) loop->Initialize(expression, enumerable, body);
-
- seen_loop_stmt_ = true;
-
// Parsed for-in loop.
return loop;
@@ -2785,17 +2829,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- seen_loop_stmt_ = false;
-
Statement* body = ParseStatement(NULL, CHECK_OK);
-
- // Mark this loop if it is an inner loop.
- if (loop && !seen_loop_stmt_) loop->set_peel_this_loop(true);
-
if (loop) loop->Initialize(init, cond, next, body);
-
- seen_loop_stmt_ = true;
-
return loop;
}
@@ -2809,8 +2844,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) {
Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK);
while (peek() == Token::COMMA) {
Expect(Token::COMMA, CHECK_OK);
+ int position = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = NEW(BinaryOperation(Token::COMMA, result, right));
+ result = NEW(BinaryOperation(Token::COMMA, result, right, position));
}
return result;
}
@@ -2822,9 +2858,11 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// ConditionalExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
+ if (fni_ != NULL) fni_->Enter();
Expression* expression = ParseConditionalExpression(accept_IN, CHECK_OK);
if (!Token::IsAssignmentOp(peek())) {
+ if (fni_ != NULL) fni_->Leave();
// Parsed conditional expression only (no assignment).
return expression;
}
@@ -2855,6 +2893,19 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
temp_scope_->AddProperty();
}
+ if (fni_ != NULL) {
+ // Check if the right hand side is a call to avoid inferring a
+ // name if we're dealing with "a = function(){...}();"-like
+ // expression.
+ if ((op == Token::INIT_VAR
+ || op == Token::INIT_CONST
+ || op == Token::ASSIGN)
+ && (right->AsCall() == NULL)) {
+ fni_->Infer();
+ }
+ fni_->Leave();
+ }
+
return NEW(Assignment(op, expression, right, pos));
}
@@ -2898,6 +2949,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
Token::Value op = Next();
+ int position = scanner().location().beg_pos;
Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
// Compute some expressions involving only number literals.
@@ -2972,7 +3024,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
// For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this
- // code an AST node eventually.)
+ // code and AST node eventually.)
if (Token::IsCompareOp(op)) {
// We have a comparison.
Token::Value cmp = op;
@@ -2981,7 +3033,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- x = NEW(CompareOperation(cmp, x, y));
+ x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
x = NEW(UnaryOperation(Token::NOT, x));
@@ -2989,7 +3041,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
} else {
// We have a "normal" binary operation.
- x = NEW(BinaryOperation(op, x, y));
+ x = NEW(BinaryOperation(op, x, y, position));
}
}
}
@@ -2997,6 +3049,27 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
}
+Expression* Parser::NewCompareNode(Token::Value op,
+ Expression* x,
+ Expression* y,
+ int position) {
+ ASSERT(op != Token::NE && op != Token::NE_STRICT);
+ if (!is_pre_parsing_ && (op == Token::EQ || op == Token::EQ_STRICT)) {
+ bool is_strict = (op == Token::EQ_STRICT);
+ Literal* x_literal = x->AsLiteral();
+ if (x_literal != NULL && x_literal->IsNull()) {
+ return NEW(CompareToNull(is_strict, y));
+ }
+
+ Literal* y_literal = y->AsLiteral();
+ if (y_literal != NULL && y_literal->IsNull()) {
+ return NEW(CompareToNull(is_strict, x));
+ }
+ }
+ return NEW(CompareOperation(op, x, y, position));
+}
+
+
Expression* Parser::ParseUnaryExpression(bool* ok) {
// UnaryExpression ::
// PostfixExpression
@@ -3043,7 +3116,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
- return NEW(CountOperation(true /* prefix */, op, expression));
+ int position = scanner().location().beg_pos;
+ IncrementOperation* increment = NEW(IncrementOperation(op, expression));
+ return NEW(CountOperation(true /* prefix */, increment, position));
} else {
return ParsePostfixExpression(ok);
@@ -3066,7 +3141,9 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
expression = NewThrowReferenceError(type);
}
Token::Value next = Next();
- expression = NEW(CountOperation(false /* postfix */, next, expression));
+ int position = scanner().location().beg_pos;
+ IncrementOperation* increment = NEW(IncrementOperation(next, expression));
+ expression = NEW(CountOperation(false /* postfix */, increment, position));
}
return expression;
}
@@ -3125,6 +3202,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -3211,6 +3289,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
result = factory()->NewProperty(result, NEW(Literal(name)), pos);
+ if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
case Token::LPAREN: {
@@ -3321,6 +3400,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::IDENTIFIER: {
Handle<String> name = ParseIdentifier(CHECK_OK);
+ if (fni_ != NULL) fni_->PushVariableName(name);
if (is_pre_parsing_) {
result = VariableProxySentinel::identifier_proxy();
} else {
@@ -3332,7 +3412,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
result = NewNumberLiteral(value);
break;
}
@@ -3343,6 +3423,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length());
result = NEW(Literal(symbol));
+ if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3640,6 +3721,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
+ if (fni_ != NULL) fni_->Enter();
+
Literal* key = NULL;
Token::Value next = peek();
switch (next) {
@@ -3648,6 +3731,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
bool is_setter = false;
Handle<String> id =
ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ if (fni_ != NULL) fni_->PushLiteralName(id);
+
if ((is_getter || is_setter) && peek() != Token::COLON) {
ObjectLiteral::Property* property =
ParseObjectLiteralGetSet(is_getter, CHECK_OK);
@@ -3656,6 +3741,11 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
properties.Add(property);
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
continue; // restart the while
}
// Failed to parse as get/set property, so it's just a property
@@ -3668,6 +3758,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Handle<String> string =
factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length());
+ if (fni_ != NULL) fni_->PushLiteralName(string);
uint32_t index;
if (!string.is_null() && string->AsArrayIndex(&index)) {
key = NewNumberLiteral(index);
@@ -3679,7 +3770,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
- StringToDouble(scanner_.literal_string(), ALLOW_HEX | ALLOW_OCTALS);
+ StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
key = NewNumberLiteral(value);
break;
}
@@ -3711,6 +3802,11 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// TODO(1240767): Consider allowing trailing comma.
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
+
+ if (fni_ != NULL) {
+ fni_->Infer();
+ fni_->Leave();
+ }
}
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
@@ -3795,10 +3891,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
-
- // Reset flag used for inner loop detection.
- seen_loop_stmt_ = false;
-
bool is_named = !var_name.is_null();
// The name associated with this function. If it's a function expression,
@@ -3862,43 +3954,52 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool is_lazily_compiled =
mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
+ int function_block_pos = scanner_.location().beg_pos;
int materialized_literal_count;
int expected_property_count;
+ int end_pos;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
- FunctionEntry entry = pre_data()->GetFunctionEnd(start_pos);
+ FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
if (!entry.is_valid()) {
ReportInvalidPreparseData(name, CHECK_OK);
}
- int end_pos = entry.end_pos();
- if (end_pos <= start_pos) {
+ end_pos = entry.end_pos();
+ if (end_pos <= function_block_pos) {
// End position greater than end of stream is safe, and hard to check.
ReportInvalidPreparseData(name, CHECK_OK);
}
- Counters::total_preparse_skipped.Increment(end_pos - start_pos);
+ Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
+ pre_data()->Skip(entry.predata_skip());
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
+ Expect(Token::RBRACE, CHECK_OK);
} else {
+ if (pre_data() != NULL) {
+ // Skip pre-data entry for non-lazily compiled function.
+ pre_data()->SkipFunctionEntry(function_block_pos);
+ }
+ FunctionEntry entry = log()->LogFunction(function_block_pos);
+ int predata_position_before = log()->position();
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
only_simple_this_property_assignments =
temp_scope.only_simple_this_property_assignments();
this_property_assignments = temp_scope.this_property_assignments();
- }
-
- Expect(Token::RBRACE, CHECK_OK);
- int end_pos = scanner_.location().end_pos;
- FunctionEntry entry = log()->LogFunction(start_pos);
- if (entry.is_valid()) {
- entry.set_end_pos(end_pos);
- entry.set_literal_count(materialized_literal_count);
- entry.set_property_count(expected_property_count);
+ Expect(Token::RBRACE, CHECK_OK);
+ end_pos = scanner_.location().end_pos;
+ if (entry.is_valid()) {
+ entry.set_end_pos(end_pos);
+ entry.set_literal_count(materialized_literal_count);
+ entry.set_property_count(expected_property_count);
+ entry.set_predata_skip(log()->position() - predata_position_before);
+ }
}
FunctionLiteral* function_literal =
@@ -3912,16 +4013,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
num_parameters,
start_pos,
end_pos,
- function_name->length() > 0));
+ function_name->length() > 0,
+ temp_scope.ContainsLoops()));
if (!is_pre_parsing_) {
function_literal->set_function_token_position(function_token_position);
}
- // Set flag for inner loop detection. We treat loops that contain a function
- // literal not as inner loops because we avoid duplicating function literals
- // when peeling or unrolling such a loop.
- seen_loop_stmt_ = true;
-
+ if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
return function_literal;
}
}
@@ -3934,7 +4032,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
Expect(Token::MOD, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Runtime::Function* function =
- Runtime::FunctionForName(scanner_.literal_string());
+ Runtime::FunctionForName(scanner_.literal());
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
if (function == NULL && extension_ != NULL) {
// The extension structures are only accessible while parsing the
@@ -4184,7 +4282,11 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
- array->SetFastElement(i, *element);
+ Object* ok = array->SetFastElement(i, *element);
+ USE(ok); // Don't get an unused variable warning.
+ // We know this doesn't cause a GC here because we allocated the JSArray
+ // large enough.
+ ASSERT(!ok->IsFailure());
}
}
ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
@@ -4221,7 +4323,7 @@ Expression* Parser::ParseJsonValue(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
ASSERT(scanner_.literal_length() > 0);
- double value = StringToDouble(scanner_.literal_string(),
+ double value = StringToDouble(scanner_.literal(),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
return NewNumberLiteral(value);
@@ -5178,10 +5280,9 @@ ScriptDataImpl* PreParse(Handle<String> source,
Bootstrapper::IsActive();
PreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL;
- // The list owns the backing store so we need to clone the vector.
- // That way, the result will be exactly the right size rather than
- // the expected 50% too large.
- Vector<unsigned> store = parser.recorder()->store()->ToVector().Clone();
+ // Extract the accumulated data from the recorder as a single
+ // contiguous vector that we are responsible for disposing.
+ Vector<unsigned> store = parser.recorder()->ExtractData();
return new ScriptDataImpl(store);
}
diff --git a/src/parser.h b/src/parser.h
index f918a3a3..2952581a 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -68,11 +68,18 @@ class FunctionEntry BASE_EMBEDDED {
void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
int property_count() { return backing_[kPropertyCountOffset]; }
- void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
+ void set_property_count(int value) {
+ backing_[kPropertyCountOffset] = value;
+ }
+
+ int predata_skip() { return backing_[kPredataSkipOffset]; }
+ void set_predata_skip(int value) {
+ backing_[kPredataSkipOffset] = value;
+ }
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 4;
+ static const int kSize = 5;
private:
Vector<unsigned> backing_;
@@ -80,6 +87,7 @@ class FunctionEntry BASE_EMBEDDED {
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
+ static const int kPredataSkipOffset = 4;
};
@@ -87,12 +95,13 @@ class ScriptDataImpl : public ScriptData {
public:
explicit ScriptDataImpl(Vector<unsigned> store)
: store_(store),
- last_entry_(0) { }
+ index_(kHeaderSize) { }
virtual ~ScriptDataImpl();
virtual int Length();
virtual const char* Data();
virtual bool HasError();
- FunctionEntry GetFunctionEnd(int start);
+ FunctionEntry GetFunctionEntry(int start);
+ void SkipFunctionEntry(int start);
bool SanityCheck();
Scanner::Location MessageLocation();
@@ -102,28 +111,33 @@ class ScriptDataImpl : public ScriptData {
bool has_error() { return store_[kHasErrorOffset]; }
unsigned magic() { return store_[kMagicOffset]; }
unsigned version() { return store_[kVersionOffset]; }
+ // Skip forward in the preparser data by the given number
+ // of unsigned ints.
+ virtual void Skip(int entries) {
+ ASSERT(entries >= 0);
+ ASSERT(entries <= store_.length() - index_);
+ index_ += entries;
+ }
static const unsigned kMagicNumber = 0xBadDead;
static const unsigned kCurrentVersion = 1;
- static const unsigned kMagicOffset = 0;
- static const unsigned kVersionOffset = 1;
- static const unsigned kHasErrorOffset = 2;
- static const unsigned kSizeOffset = 3;
- static const unsigned kHeaderSize = 4;
+ static const int kMagicOffset = 0;
+ static const int kVersionOffset = 1;
+ static const int kHasErrorOffset = 2;
+ static const int kSizeOffset = 3;
+ static const int kHeaderSize = 4;
private:
+ Vector<unsigned> store_;
+ int index_;
+
unsigned Read(int position);
unsigned* ReadAddress(int position);
- int EntryCount();
- FunctionEntry nth(int n);
-
- Vector<unsigned> store_;
- // The last entry returned. This is used to make lookup faster:
- // the next entry to return is typically the next entry so lookup
- // will usually be much faster if we start from the last entry.
- int last_entry_;
+ void FindStart(int position);
+ // Read strings written by ParserRecorder::WriteString.
+ static const char* ReadString(unsigned* start, int* chars);
};
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 75f6fc3c..211f3f63 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -376,6 +376,11 @@ void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
}
+void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void PrettyPrinter::VisitCountOperation(CountOperation* node) {
Print("(");
if (node->is_prefix()) Print("%s", Token::String(node->op()));
@@ -403,6 +408,13 @@ void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
+ Print("(");
+ Visit(node->expression());
+ Print("%s null)", Token::String(node->op()));
+}
+
+
void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
Print("<this-function>");
}
@@ -604,11 +616,6 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->Print(StaticType::Type2String(expr->type()));
printed_first = true;
}
- if (expr->num() != AstNode::kNoNumber) {
- ast_printer_->Print(printed_first ? ", num = " : " (num = ");
- ast_printer_->Print("%d", expr->num());
- printed_first = true;
- }
if (printed_first) ast_printer_->Print(")");
}
ast_printer_->Print("\n");
@@ -667,9 +674,7 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type,
- int num,
- bool is_primitive) {
+ StaticType* type) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
@@ -680,11 +685,6 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
pos += OS::SNPrintF(buf + pos, ", type = %s",
StaticType::Type2String(type));
}
- if (num != AstNode::kNoNumber) {
- pos += OS::SNPrintF(buf + pos, ", num = %d", num);
- }
- pos += OS::SNPrintF(buf + pos,
- is_primitive ? ", primitive" : ", non-primitive");
OS::SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@@ -742,9 +742,7 @@ void AstPrinter::PrintParameters(Scope* scope) {
for (int i = 0; i < scope->num_parameters(); i++) {
PrintLiteralWithModeIndented("VAR", scope->parameter(i),
scope->parameter(i)->name(),
- scope->parameter(i)->type(),
- AstNode::kNoNumber,
- false);
+ scope->parameter(i)->type());
}
}
}
@@ -789,9 +787,7 @@ void AstPrinter::VisitDeclaration(Declaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->AsVariable(),
node->proxy()->name(),
- node->proxy()->AsVariable()->type(),
- AstNode::kNoNumber,
- node->proxy()->IsPrimitive());
+ node->proxy()->AsVariable()->type());
} else {
// function declarations
PrintIndented("FUNCTION ");
@@ -1027,7 +1023,7 @@ void AstPrinter::VisitSlot(Slot* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
- node->type(), node->num(), node->IsPrimitive());
+ node->type());
Variable* var = node->var();
if (var != NULL && var->rewrite() != NULL) {
IndentedScope indent;
@@ -1086,6 +1082,11 @@ void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
}
+void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
if (node->type()->IsKnown()) {
@@ -1115,6 +1116,15 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) {
}
+void AstPrinter::VisitCompareToNull(CompareToNull* node) {
+ const char* name = node->is_strict()
+ ? "COMPARE-TO-NULL-STRICT"
+ : "COMPARE-TO-NULL";
+ IndentedScope indent(name, node);
+ Visit(node->expression());
+}
+
+
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent("THIS-FUNCTION");
}
@@ -1477,6 +1487,11 @@ void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
}
+void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
+ UNREACHABLE();
+}
+
+
void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
TagScope tag(this, "CountOperation");
{
@@ -1510,6 +1525,16 @@ void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
}
+void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
+ TagScope tag(this, "CompareToNull");
+ {
+ AttributesScope attributes(this);
+ AddAttribute("is_strict", expr->is_strict());
+ }
+ Visit(expr->expression());
+}
+
+
void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
TagScope tag(this, "ThisFunction");
}
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 93ba0d95..dfff49a4 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -102,9 +102,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- StaticType* type,
- int num,
- bool is_primitive);
+ StaticType* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 1c6c902a..2de7a2fb 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -492,6 +492,10 @@ CpuProfilesCollection::~CpuProfilesCollection() {
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
ASSERT(uid > 0);
current_profiles_semaphore_->Wait();
+ if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
+ current_profiles_semaphore_->Signal();
+ return false;
+ }
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 5611b6fa..c6d6f4cb 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -299,6 +299,9 @@ class CpuProfilesCollection {
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
+ // Limits the number of profiles that can be simultaneously collected.
+ static const int kMaxSimultaneousProfiles = 100;
+
private:
const char* GetName(int args_count);
const char* GetFunctionName(String* name) {
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 9f8e2c5c..652b690d 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -28,6 +28,8 @@
#ifndef V8_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_MACRO_ASSEMBLER_H_
+#include "ast.h"
+
namespace v8 {
namespace internal {
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 73301b91..4ddf1bf6 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -28,21 +28,15 @@
#include "v8.h"
#include "ast.h"
-#include "func-name-inferrer.h"
#include "scopes.h"
#include "rewriter.h"
namespace v8 {
namespace internal {
-
class AstOptimizer: public AstVisitor {
public:
explicit AstOptimizer() : has_function_literal_(false) {}
- explicit AstOptimizer(Handle<String> enclosing_name)
- : has_function_literal_(false) {
- func_name_inferrer_.PushEnclosingName(enclosing_name);
- }
void Optimize(ZoneList<Statement*>* statements);
@@ -50,8 +44,6 @@ class AstOptimizer: public AstVisitor {
// Used for loop condition analysis. Cleared before visiting a loop
// condition, set when a function literal is visited.
bool has_function_literal_;
- // Helper object for function name inferring.
- FuncNameInferrer func_name_inferrer_;
// Helpers
void OptimizeArguments(ZoneList<Expression*>* arguments);
@@ -113,7 +105,7 @@ void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
- node->may_have_function_literal_ = has_function_literal_;
+ node->set_may_have_function_literal(has_function_literal_);
Visit(node->body());
}
@@ -126,7 +118,7 @@ void AstOptimizer::VisitForStatement(ForStatement* node) {
has_function_literal_ = false;
node->cond()->set_no_negative_zero(true);
Visit(node->cond());
- node->may_have_function_literal_ = has_function_literal_;
+ node->set_may_have_function_literal(has_function_literal_);
}
Visit(node->body());
if (node->next() != NULL) {
@@ -211,11 +203,6 @@ void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
has_function_literal_ = true;
-
- if (node->name()->length() == 0) {
- // Anonymous function.
- func_name_inferrer_.AddFunction(node);
- }
}
@@ -247,11 +234,6 @@ void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
var->type()->SetAsLikelySmi();
}
- if (!var->is_this() &&
- !Heap::result_symbol()->Equals(*var->name())) {
- func_name_inferrer_.PushName(var->name());
- }
-
if (FLAG_safe_int32_compiler) {
if (var->IsStackAllocated() &&
!var->is_arguments() &&
@@ -268,11 +250,6 @@ void AstOptimizer::VisitLiteral(Literal* node) {
if (literal->IsSmi()) {
node->type()->SetAsLikelySmi();
node->set_side_effect_free(true);
- } else if (literal->IsString()) {
- Handle<String> lit_str(Handle<String>::cast(literal));
- if (!Heap::prototype_symbol()->Equals(*lit_str)) {
- func_name_inferrer_.PushName(lit_str);
- }
} else if (literal->IsHeapNumber()) {
if (node->to_int32()) {
// Any HeapNumber has an int32 value if it is the input to a bit op.
@@ -299,8 +276,6 @@ void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
- scoped_fni.Enter();
Visit(node->properties()->at(i)->key());
Visit(node->properties()->at(i)->value());
}
@@ -314,17 +289,11 @@ void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
void AstOptimizer::VisitAssignment(Assignment* node) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
switch (node->op()) {
case Token::INIT_VAR:
case Token::INIT_CONST:
case Token::ASSIGN:
// No type can be infered from the general assignment.
-
- // Don't infer if it is "a = function(){...}();"-like expression.
- if (node->value()->AsCall() == NULL) {
- scoped_fni.Enter();
- }
break;
case Token::ASSIGN_BIT_OR:
case Token::ASSIGN_BIT_XOR:
@@ -430,12 +399,6 @@ void AstOptimizer::VisitCallNew(CallNew* node) {
void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
- ScopedFuncNameInferrer scoped_fni(&func_name_inferrer_);
- if (Factory::InitializeVarGlobal_symbol()->Equals(*node->name()) &&
- node->arguments()->length() >= 2 &&
- node->arguments()->at(1)->AsFunctionLiteral() != NULL) {
- scoped_fni.Enter();
- }
OptimizeArguments(node->arguments());
}
@@ -472,6 +435,11 @@ void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
}
+void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void AstOptimizer::VisitCountOperation(CountOperation* node) {
// Count operations assume that they work on Smis.
node->expression()->set_no_negative_zero(node->is_prefix() ?
@@ -704,6 +672,11 @@ void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
}
+void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
+ Visit(node->expression());
+}
+
+
void AstOptimizer::VisitThisFunction(ThisFunction* node) {
USE(node);
}
@@ -978,6 +951,11 @@ void Processor::VisitUnaryOperation(UnaryOperation* node) {
}
+void Processor::VisitIncrementOperation(IncrementOperation* node) {
+ UNREACHABLE();
+}
+
+
void Processor::VisitCountOperation(CountOperation* node) {
USE(node);
UNREACHABLE();
@@ -996,6 +974,12 @@ void Processor::VisitCompareOperation(CompareOperation* node) {
}
+void Processor::VisitCompareToNull(CompareToNull* node) {
+ USE(node);
+ UNREACHABLE();
+}
+
+
void Processor::VisitThisFunction(ThisFunction* node) {
USE(node);
UNREACHABLE();
@@ -1025,7 +1009,7 @@ bool Rewriter::Optimize(FunctionLiteral* function) {
if (FLAG_optimize_ast && !body->is_empty()) {
HistogramTimerScope timer(&Counters::ast_optimization);
- AstOptimizer optimizer(function->name());
+ AstOptimizer optimizer;
optimizer.Optimize(body);
if (optimizer.HasStackOverflow()) {
return false;
diff --git a/src/runtime.cc b/src/runtime.cc
index afb0df0f..c7ec6bfb 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -98,7 +98,7 @@ namespace internal {
static StaticResource<StringInputBuffer> runtime_string_input_buffer;
-static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
+MUST_USE_RESULT static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
StackLimitCheck check;
if (check.HasOverflowed()) return Top::StackOverflow();
@@ -980,7 +980,9 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
context->set(index, *initial_value);
}
} else {
- Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
+ // The holder is an arguments object.
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ SetElement(arguments, index, initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@@ -1238,7 +1240,8 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
} else {
// The holder is an arguments object.
ASSERT((attributes & READ_ONLY) == 0);
- Handle<JSObject>::cast(holder)->SetElement(index, *value);
+ Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+ SetElement(arguments, index, value);
}
return *value;
}
@@ -1403,8 +1406,6 @@ static Object* Runtime_RegExpCloneResult(Arguments args) {
// Copy JSObject elements as copy-on-write.
FixedArray* elements = FixedArray::cast(result->elements());
if (elements != Heap::empty_fixed_array()) {
- ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
- // No write barrier is necessary when writing old-space pointer.
elements->set_map(Heap::fixed_cow_array_map());
}
new_array->set_elements(elements);
@@ -1703,7 +1704,6 @@ static Object* Runtime_SetCode(Arguments args) {
RUNTIME_ASSERT(code->IsJSFunction());
Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
Handle<SharedFunctionInfo> shared(fun->shared());
- SetExpectedNofProperties(target, shared->expected_nof_properties());
if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
return Failure::Exception();
@@ -1748,6 +1748,17 @@ static Object* Runtime_SetCode(Arguments args) {
}
+static Object* Runtime_SetExpectedNumberOfProperties(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_SMI_CHECKED(num, args[1]);
+ RUNTIME_ASSERT(num >= 0);
+ SetExpectedNofProperties(function, num);
+ return Heap::undefined_value();
+}
+
+
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (char_code->ToArrayIndex(&code)) {
@@ -2807,40 +2818,6 @@ static int BoyerMooreIndexOf(Vector<const schar> subject,
}
-template <typename schar>
-static inline int SingleCharIndexOf(Vector<const schar> string,
- schar pattern_char,
- int start_index) {
- if (sizeof(schar) == 1) {
- const schar* pos = reinterpret_cast<const schar*>(
- memchr(string.start() + start_index,
- pattern_char,
- string.length() - start_index));
- if (pos == NULL) return -1;
- return static_cast<int>(pos - string.start());
- }
- for (int i = start_index, n = string.length(); i < n; i++) {
- if (pattern_char == string[i]) {
- return i;
- }
- }
- return -1;
-}
-
-
-template <typename schar>
-static int SingleCharLastIndexOf(Vector<const schar> string,
- schar pattern_char,
- int start_index) {
- for (int i = start_index; i >= 0; i--) {
- if (pattern_char == string[i]) {
- return i;
- }
- }
- return -1;
-}
-
-
// Trivial string search for shorter strings.
// On return, if "complete" is set to true, the return value is the
// final result of searching for the patter in the subject.
@@ -2852,6 +2829,7 @@ static int SimpleIndexOf(Vector<const schar> subject,
Vector<const pchar> pattern,
int idx,
bool* complete) {
+ ASSERT(pattern.length() > 1);
// Badness is a count of how much work we have done. When we have
// done enough work we decide it's probably worth switching to a better
// algorithm.
@@ -2914,12 +2892,12 @@ static int SimpleIndexOf(Vector<const schar> subject,
if (subject[i] != pattern_first_char) continue;
}
int j = 1;
- do {
+ while (j < pattern.length()) {
if (pattern[j] != subject[i+j]) {
break;
}
j++;
- } while (j < pattern.length());
+ }
if (j == pattern.length()) {
return i;
}
@@ -2935,7 +2913,6 @@ enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
template <typename pchar>
static inline StringSearchStrategy InitializeStringSearch(
Vector<const pchar> pat, bool ascii_subject) {
- ASSERT(pat.length() > 1);
// We have an ASCII haystack and a non-ASCII needle. Check if there
// really is a non-ASCII character in the needle and bail out if there
// is.
@@ -3021,54 +2998,15 @@ int Runtime::StringMatch(Handle<String> sub,
int subject_length = sub->length();
if (start_index + pattern_length > subject_length) return -1;
- if (!sub->IsFlat()) {
- FlattenString(sub);
- }
-
- // Searching for one specific character is common. For one
- // character patterns linear search is necessary, so any smart
- // algorithm is unnecessary overhead.
- if (pattern_length == 1) {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- String* seq_sub = *sub;
- if (seq_sub->IsConsString()) {
- seq_sub = ConsString::cast(seq_sub)->first();
- }
- if (seq_sub->IsAsciiRepresentation()) {
- uc16 pchar = pat->Get(0);
- if (pchar > String::kMaxAsciiCharCode) {
- return -1;
- }
- Vector<const char> ascii_vector =
- seq_sub->ToAsciiVector().SubVector(start_index, subject_length);
- const void* pos = memchr(ascii_vector.start(),
- static_cast<const char>(pchar),
- static_cast<size_t>(ascii_vector.length()));
- if (pos == NULL) {
- return -1;
- }
- return static_cast<int>(reinterpret_cast<const char*>(pos)
- - ascii_vector.start() + start_index);
- }
- return SingleCharIndexOf(seq_sub->ToUC16Vector(),
- pat->Get(0),
- start_index);
- }
-
- if (!pat->IsFlat()) {
- FlattenString(pat);
- }
+ if (!sub->IsFlat()) FlattenString(sub);
+ if (!pat->IsFlat()) FlattenString(pat);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
String* seq_sub = *sub;
- if (seq_sub->IsConsString()) {
- seq_sub = ConsString::cast(seq_sub)->first();
- }
+ if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
String* seq_pat = *pat;
- if (seq_pat->IsConsString()) {
- seq_pat = ConsString::cast(seq_pat)->first();
- }
+ if (seq_pat->IsConsString()) seq_pat = ConsString::cast(seq_pat)->first();
// dispatch on type of strings
if (seq_pat->IsAsciiRepresentation()) {
@@ -3158,30 +3096,8 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
return Smi::FromInt(start_index);
}
- if (!sub->IsFlat()) {
- FlattenString(sub);
- }
-
- if (pat_length == 1) {
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- if (sub->IsAsciiRepresentation()) {
- uc16 pchar = pat->Get(0);
- if (pchar > String::kMaxAsciiCharCode) {
- return Smi::FromInt(-1);
- }
- return Smi::FromInt(SingleCharLastIndexOf(sub->ToAsciiVector(),
- static_cast<char>(pat->Get(0)),
- start_index));
- } else {
- return Smi::FromInt(SingleCharLastIndexOf(sub->ToUC16Vector(),
- pat->Get(0),
- start_index));
- }
- }
-
- if (!pat->IsFlat()) {
- FlattenString(pat);
- }
+ if (!sub->IsFlat()) FlattenString(sub);
+ if (!pat->IsFlat()) FlattenString(pat);
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
@@ -3359,88 +3275,6 @@ static void SetLastMatchInfoNoCaptures(Handle<String> subject,
}
-template <typename schar>
-static bool SearchCharMultiple(Vector<schar> subject,
- String* pattern,
- schar pattern_char,
- FixedArrayBuilder* builder,
- int* match_pos) {
- // Position of last match.
- int pos = *match_pos;
- int subject_length = subject.length();
- while (pos < subject_length) {
- int match_end = pos + 1;
- if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
- *match_pos = pos;
- return false;
- }
- int new_pos = SingleCharIndexOf(subject, pattern_char, match_end);
- if (new_pos >= 0) {
- // Match has been found.
- if (new_pos > match_end) {
- ReplacementStringBuilder::AddSubjectSlice(builder, match_end, new_pos);
- }
- pos = new_pos;
- builder->Add(pattern);
- } else {
- break;
- }
- }
- if (pos + 1 < subject_length) {
- ReplacementStringBuilder::AddSubjectSlice(builder, pos + 1, subject_length);
- }
- *match_pos = pos;
- return true;
-}
-
-
-static bool SearchCharMultiple(Handle<String> subject,
- Handle<String> pattern,
- Handle<JSArray> last_match_info,
- FixedArrayBuilder* builder) {
- ASSERT(subject->IsFlat());
- ASSERT_EQ(1, pattern->length());
- uc16 pattern_char = pattern->Get(0);
- // Treating position before first as initial "previous match position".
- int match_pos = -1;
-
- for (;;) { // Break when search complete.
- builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
- AssertNoAllocation no_gc;
- if (subject->IsAsciiRepresentation()) {
- if (pattern_char > String::kMaxAsciiCharCode) {
- break;
- }
- Vector<const char> subject_vector = subject->ToAsciiVector();
- char pattern_ascii_char = static_cast<char>(pattern_char);
- bool complete = SearchCharMultiple<const char>(subject_vector,
- *pattern,
- pattern_ascii_char,
- builder,
- &match_pos);
- if (complete) break;
- } else {
- Vector<const uc16> subject_vector = subject->ToUC16Vector();
- bool complete = SearchCharMultiple<const uc16>(subject_vector,
- *pattern,
- pattern_char,
- builder,
- &match_pos);
- if (complete) break;
- }
- }
-
- if (match_pos >= 0) {
- SetLastMatchInfoNoCaptures(subject,
- last_match_info,
- match_pos,
- match_pos + 1);
- return true;
- }
- return false; // No matches at all.
-}
-
-
template <typename schar, typename pchar>
static bool SearchStringMultiple(Vector<schar> subject,
String* pattern,
@@ -3518,7 +3352,6 @@ static bool SearchStringMultiple(Handle<String> subject,
FixedArrayBuilder* builder) {
ASSERT(subject->IsFlat());
ASSERT(pattern->IsFlat());
- ASSERT(pattern->length() > 1);
// Treating as if a previous match was before first character.
int match_pos = -pattern->length();
@@ -3776,14 +3609,6 @@ static Object* Runtime_RegExpExecMultiple(Arguments args) {
if (regexp->TypeTag() == JSRegExp::ATOM) {
Handle<String> pattern(
String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
- int pattern_length = pattern->length();
- if (pattern_length == 1) {
- if (SearchCharMultiple(subject, pattern, last_match_info, &builder)) {
- return *builder.ToJSArray(result_array);
- }
- return Heap::null_value();
- }
-
if (!pattern->IsFlat()) FlattenString(pattern);
if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
return *builder.ToJSArray(result_array);
@@ -4097,7 +3922,8 @@ static Object* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
if (result.IsProperty() &&
(result.type() == FIELD || result.type() == NORMAL
|| result.type() == CONSTANT_FUNCTION)) {
- obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ Object* ok = obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ if (ok->IsFailure()) return ok;
}
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
}
@@ -4846,6 +4672,19 @@ static Object* Runtime_StringToNumber(Arguments args) {
if (minus) {
if (d == 0) return Heap::minus_zero_value();
d = -d;
+ } else if (!subject->HasHashCode() &&
+ len <= String::kMaxArrayIndexSize &&
+ (len == 1 || data[0] != '0')) {
+ // String hash is not calculated yet but all the data are present.
+ // Update the hash field to speed up sequential convertions.
+ uint32_t hash = StringHasher::MakeCachedArrayIndex(d, len);
+#ifdef DEBUG
+ ASSERT((hash & String::kContainsCachedArrayIndexMask) == 0);
+ subject->Hash(); // Force hash calculation.
+ ASSERT_EQ(static_cast<int>(subject->hash_field()),
+ static_cast<int>(hash));
+#endif
+ subject->set_hash_field(hash);
}
return Smi::FromInt(d);
}
@@ -5371,23 +5210,6 @@ void FindStringIndices(Vector<const schar> subject,
}
}
-template <typename schar>
-inline void FindCharIndices(Vector<const schar> subject,
- const schar pattern_char,
- ZoneList<int>* indices,
- unsigned int limit) {
- // Collect indices of pattern_char in subject, and the end-of-string index.
- // Stop after finding at most limit values.
- int index = 0;
- while (limit > 0) {
- index = SingleCharIndexOf(subject, pattern_char, index);
- if (index < 0) return;
- indices->Add(index);
- index++;
- limit--;
- }
-}
-
static Object* Runtime_StringSplit(Arguments args) {
ASSERT(args.length() == 3);
@@ -5413,22 +5235,10 @@ static Object* Runtime_StringSplit(Arguments args) {
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
ZoneList<int> indices(initial_capacity);
- if (pattern_length == 1) {
- // Special case, go directly to fast single-character split.
- AssertNoAllocation nogc;
- uc16 pattern_char = pattern->Get(0);
- if (subject->IsTwoByteRepresentation()) {
- FindCharIndices(subject->ToUC16Vector(), pattern_char,
- &indices,
- limit);
- } else if (pattern_char <= String::kMaxAsciiCharCode) {
- FindCharIndices(subject->ToAsciiVector(),
- static_cast<char>(pattern_char),
- &indices,
- limit);
- }
- } else {
- if (!pattern->IsFlat()) FlattenString(pattern);
+ if (!pattern->IsFlat()) FlattenString(pattern);
+
+ // No allocation block.
+ {
AssertNoAllocation nogc;
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
@@ -5458,11 +5268,12 @@ static Object* Runtime_StringSplit(Arguments args) {
}
}
}
+
if (static_cast<uint32_t>(indices.length()) < limit) {
indices.Add(subject_length);
}
- // The list indices now contains the end of each part to create.
+ // The list indices now contains the end of each part to create.
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
@@ -5567,6 +5378,14 @@ static Object* Runtime_StringToArray(Arguments args) {
}
+static Object* Runtime_NewStringWrapper(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(String, value, args[0]);
+ return value->ToObject();
+}
+
+
bool Runtime::IsUpperCaseChar(uint16_t ch) {
unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
int char_length = to_upper_mapping.get(ch, 0, chars);
@@ -10690,9 +10509,10 @@ Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
}
-Runtime::Function* Runtime::FunctionForName(const char* name) {
+Runtime::Function* Runtime::FunctionForName(Vector<const char> name) {
for (Function* f = Runtime_functions; f->name != NULL; f++) {
- if (strcmp(f->name, name) == 0) {
+ if (strncmp(f->name, name.start(), name.length()) == 0
+ && f->name[name.length()] == 0) {
return f;
}
}
diff --git a/src/runtime.h b/src/runtime.h
index 001e05fc..312907ad 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -174,6 +174,7 @@ namespace internal {
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
F(StringToArray, 1, 1) \
+ F(NewStringWrapper, 1, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -201,6 +202,7 @@ namespace internal {
\
F(ClassOf, 1, 1) \
F(SetCode, 2, 1) \
+ F(SetExpectedNumberOfProperties, 2, 1) \
\
F(CreateApiFunction, 1, 1) \
F(IsTemplate, 1, 1) \
@@ -419,7 +421,7 @@ class Runtime : public AllStatic {
static Function* FunctionForId(FunctionId fid);
// Get the runtime function with the given name.
- static Function* FunctionForName(const char* name);
+ static Function* FunctionForName(Vector<const char> name);
static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
diff --git a/src/runtime.js b/src/runtime.js
index 42968104..f2c8d6b8 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -502,7 +502,10 @@ function ToBoolean(x) {
// ECMA-262, section 9.3, page 31.
function ToNumber(x) {
if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) return %StringToNumber(x);
+ if (IS_STRING(x)) {
+ return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
+ : %StringToNumber(x);
+ }
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return $NaN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
diff --git a/src/scanner.cc b/src/scanner.cc
index ca0e2d86..1a8d721c 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -50,35 +50,22 @@ StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_;
// ----------------------------------------------------------------------------
// UTF8Buffer
-UTF8Buffer::UTF8Buffer() : data_(NULL), limit_(NULL) { }
+UTF8Buffer::UTF8Buffer() : buffer_(kInitialCapacity) { }
-UTF8Buffer::~UTF8Buffer() {
- if (data_ != NULL) DeleteArray(data_);
-}
+UTF8Buffer::~UTF8Buffer() {}
void UTF8Buffer::AddCharSlow(uc32 c) {
- static const int kCapacityGrowthLimit = 1 * MB;
- if (cursor_ > limit_) {
- int old_capacity = Capacity();
- int old_position = pos();
- int new_capacity =
- Min(old_capacity * 3, old_capacity + kCapacityGrowthLimit);
- char* new_data = NewArray<char>(new_capacity);
- memcpy(new_data, data_, old_position);
- DeleteArray(data_);
- data_ = new_data;
- cursor_ = new_data + old_position;
- limit_ = ComputeLimit(new_data, new_capacity);
- ASSERT(Capacity() == new_capacity && pos() == old_position);
- }
- if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
- *cursor_++ = c; // Common case: 7-bit ASCII.
- } else {
- cursor_ += unibrow::Utf8::Encode(cursor_, c);
- }
- ASSERT(pos() <= Capacity());
+ ASSERT(static_cast<unsigned>(c) > unibrow::Utf8::kMaxOneByteChar);
+ int length = unibrow::Utf8::Length(c);
+ Vector<char> block = buffer_.AddBlock(length, '\0');
+#ifdef DEBUG
+ int written_length = unibrow::Utf8::Encode(block.start(), c);
+ CHECK_EQ(length, written_length);
+#else
+ unibrow::Utf8::Encode(block.start(), c);
+#endif
}
@@ -332,6 +319,26 @@ void KeywordMatcher::Step(uc32 input) {
}
+
+// ----------------------------------------------------------------------------
+// Scanner::LiteralScope
+
+Scanner::LiteralScope::LiteralScope(Scanner* self)
+ : scanner_(self), complete_(false) {
+ self->StartLiteral();
+}
+
+
+Scanner::LiteralScope::~LiteralScope() {
+ if (!complete_) scanner_->DropLiteral();
+}
+
+
+void Scanner::LiteralScope::Complete() {
+ scanner_->TerminateLiteral();
+ complete_ = true;
+}
+
// ----------------------------------------------------------------------------
// Scanner
@@ -399,8 +406,10 @@ void Scanner::Init(Handle<String> source,
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
- // Initializer current_ to not refer to a literal buffer.
- current_.literal_buffer = NULL;
+ // Initialize current_ to not refer to a literal.
+ current_.literal_chars = Vector<const char>();
+ // Reset literal buffer.
+ literal_buffer_.Reset();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@@ -428,24 +437,22 @@ Token::Value Scanner::Next() {
void Scanner::StartLiteral() {
- // Use the first buffer unless it's currently in use by the current_ token.
- // In most cases we won't have two literals/identifiers in a row, so
- // the second buffer won't be used very often and is unlikely to grow much.
- UTF8Buffer* free_buffer =
- (current_.literal_buffer != &literal_buffer_1_) ? &literal_buffer_1_
- : &literal_buffer_2_;
- next_.literal_buffer = free_buffer;
- free_buffer->Reset();
+ literal_buffer_.StartLiteral();
}
void Scanner::AddChar(uc32 c) {
- next_.literal_buffer->AddChar(c);
+ literal_buffer_.AddChar(c);
}
void Scanner::TerminateLiteral() {
- AddChar(0);
+ next_.literal_chars = literal_buffer_.EndLiteral();
+}
+
+
+void Scanner::DropLiteral() {
+ literal_buffer_.DropLiteral();
}
@@ -575,7 +582,7 @@ Token::Value Scanner::ScanHtmlComment() {
void Scanner::ScanJson() {
- next_.literal_buffer = NULL;
+ next_.literal_chars = Vector<const char>();
Token::Value token;
has_line_terminator_before_next_ = false;
do {
@@ -657,7 +664,7 @@ void Scanner::ScanJson() {
Token::Value Scanner::ScanJsonString() {
ASSERT_EQ('"', c0_);
Advance();
- StartLiteral();
+ LiteralScope literal(this);
while (c0_ != '"' && c0_ > 0) {
// Check for control character (0x00-0x1f) or unterminated string (<0).
if (c0_ < 0x20) return Token::ILLEGAL;
@@ -691,7 +698,9 @@ Token::Value Scanner::ScanJsonString() {
for (int i = 0; i < 4; i++) {
Advance();
int digit = HexValue(c0_);
- if (digit < 0) return Token::ILLEGAL;
+ if (digit < 0) {
+ return Token::ILLEGAL;
+ }
value = value * 16 + digit;
}
AddChar(value);
@@ -706,14 +715,14 @@ Token::Value Scanner::ScanJsonString() {
if (c0_ != '"') {
return Token::ILLEGAL;
}
- TerminateLiteral();
+ literal.Complete();
Advance();
return Token::STRING;
}
Token::Value Scanner::ScanJsonNumber() {
- StartLiteral();
+ LiteralScope literal(this);
if (c0_ == '-') AddCharAdvance();
if (c0_ == '0') {
AddCharAdvance();
@@ -741,27 +750,27 @@ Token::Value Scanner::ScanJsonNumber() {
AddCharAdvance();
} while (c0_ >= '0' && c0_ <= '9');
}
- TerminateLiteral();
+ literal.Complete();
return Token::NUMBER;
}
Token::Value Scanner::ScanJsonIdentifier(const char* text,
Token::Value token) {
- StartLiteral();
+ LiteralScope literal(this);
while (*text != '\0') {
if (c0_ != *text) return Token::ILLEGAL;
Advance();
text++;
}
if (kIsIdentifierPart.get(c0_)) return Token::ILLEGAL;
- TerminateLiteral();
+ literal.Complete();
return token;
}
void Scanner::ScanJavaScript() {
- next_.literal_buffer = NULL;
+ next_.literal_chars = Vector<const char>();
Token::Value token;
has_line_terminator_before_next_ = false;
do {
@@ -1098,7 +1107,7 @@ Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
- StartLiteral();
+ LiteralScope literal(this);
while (c0_ != quote && c0_ >= 0 && !kIsLineTerminator.get(c0_)) {
uc32 c = c0_;
Advance();
@@ -1109,10 +1118,8 @@ Token::Value Scanner::ScanString() {
AddChar(c);
}
}
- if (c0_ != quote) {
- return Token::ILLEGAL;
- }
- TerminateLiteral();
+ if (c0_ != quote) return Token::ILLEGAL;
+ literal.Complete();
Advance(); // consume quote
return Token::STRING;
@@ -1148,7 +1155,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
- StartLiteral();
+ LiteralScope literal(this);
if (seen_period) {
// we have already seen a decimal point of the float
AddChar('.');
@@ -1164,12 +1171,13 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// hex number
kind = HEX;
AddCharAdvance();
- if (!IsHexDigit(c0_))
+ if (!IsHexDigit(c0_)) {
// we must have at least one hex digit after 'x'/'X'
return Token::ILLEGAL;
- while (IsHexDigit(c0_))
+ }
+ while (IsHexDigit(c0_)) {
AddCharAdvance();
-
+ }
} else if ('0' <= c0_ && c0_ <= '7') {
// (possible) octal number
kind = OCTAL;
@@ -1202,12 +1210,12 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
AddCharAdvance();
if (c0_ == '+' || c0_ == '-')
AddCharAdvance();
- if (!IsDecimalDigit(c0_))
+ if (!IsDecimalDigit(c0_)) {
// we must have at least one decimal digit after 'e'/'E'
return Token::ILLEGAL;
+ }
ScanDecimalDigits();
}
- TerminateLiteral();
// The source character immediately following a numeric literal must
// not be an identifier start or a decimal digit; see ECMA-262
@@ -1216,6 +1224,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (IsDecimalDigit(c0_) || kIsIdentifierStart.get(c0_))
return Token::ILLEGAL;
+ literal.Complete();
+
return Token::NUMBER;
}
@@ -1235,7 +1245,7 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
Token::Value Scanner::ScanIdentifier() {
ASSERT(kIsIdentifierStart.get(c0_));
- StartLiteral();
+ LiteralScope literal(this);
KeywordMatcher keyword_match;
// Scan identifier start character.
@@ -1265,7 +1275,7 @@ Token::Value Scanner::ScanIdentifier() {
Advance();
}
}
- TerminateLiteral();
+ literal.Complete();
return keyword_match.token();
}
@@ -1295,36 +1305,32 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
// Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
// the scanner should pass uninterpreted bodies to the RegExp
// constructor.
- StartLiteral();
+ LiteralScope literal(this);
if (seen_equal)
AddChar('=');
while (c0_ != '/' || in_character_class) {
- if (kIsLineTerminator.get(c0_) || c0_ < 0)
- return false;
+ if (kIsLineTerminator.get(c0_) || c0_ < 0) return false;
if (c0_ == '\\') { // escaped character
AddCharAdvance();
- if (kIsLineTerminator.get(c0_) || c0_ < 0)
- return false;
+ if (kIsLineTerminator.get(c0_) || c0_ < 0) return false;
AddCharAdvance();
} else { // unescaped character
- if (c0_ == '[')
- in_character_class = true;
- if (c0_ == ']')
- in_character_class = false;
+ if (c0_ == '[') in_character_class = true;
+ if (c0_ == ']') in_character_class = false;
AddCharAdvance();
}
}
Advance(); // consume '/'
- TerminateLiteral();
+ literal.Complete();
return true;
}
bool Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
- StartLiteral();
+ LiteralScope literal(this);
while (kIsIdentifierPart.get(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
@@ -1337,7 +1343,7 @@ bool Scanner::ScanRegExpFlags() {
}
AddCharAdvance();
}
- TerminateLiteral();
+ literal.Complete();
next_.location.end_pos = source_pos() - 1;
return true;
diff --git a/src/scanner.h b/src/scanner.h
index 2dce5a18..8d618469 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -40,46 +40,45 @@ class UTF8Buffer {
UTF8Buffer();
~UTF8Buffer();
- void AddChar(uc32 c) {
- ASSERT_NOT_NULL(data_);
- if (cursor_ <= limit_ &&
- static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
- *cursor_++ = static_cast<char>(c);
+ inline void AddChar(uc32 c) {
+ if (static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
+ buffer_.Add(static_cast<char>(c));
} else {
AddCharSlow(c);
}
}
- void Reset() {
- if (data_ == NULL) {
- data_ = NewArray<char>(kInitialCapacity);
- limit_ = ComputeLimit(data_, kInitialCapacity);
- }
- cursor_ = data_;
+ void StartLiteral() {
+ buffer_.StartSequence();
}
- int pos() const {
- ASSERT_NOT_NULL(data_);
- return static_cast<int>(cursor_ - data_);
+ Vector<const char> EndLiteral() {
+ buffer_.Add(kEndMarker);
+ Vector<char> sequence = buffer_.EndSequence();
+ return Vector<const char>(sequence.start(), sequence.length());
}
- char* data() const { return data_; }
-
- private:
- static const int kInitialCapacity = 256;
- char* data_;
- char* cursor_;
- char* limit_;
-
- int Capacity() const {
- ASSERT_NOT_NULL(data_);
- return static_cast<int>(limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+ void DropLiteral() {
+ buffer_.DropSequence();
}
- static char* ComputeLimit(char* data, int capacity) {
- return (data + capacity) - unibrow::Utf8::kMaxEncodedSize;
+ void Reset() {
+ buffer_.Reset();
}
+ // The end marker added after a parsed literal.
+ // Using zero allows the usage of strlen and similar functions on
+ // identifiers and numbers (but not strings, since they may contain zero
+ // bytes).
+ // TODO(lrn): Use '\xff' as end marker, since it cannot occur inside
+ // an utf-8 string. This requires changes in all places that uses
+ // str-functions on the literals, but allows a single pointer to represent
+ // the literal, even if it contains embedded zeros.
+ static const char kEndMarker = '\x00';
+ private:
+ static const int kInitialCapacity = 256;
+ SequenceCollector<char, 4> buffer_;
+
void AddCharSlow(uc32 c);
};
@@ -271,6 +270,17 @@ class Scanner {
public:
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+ class LiteralScope {
+ public:
+ explicit LiteralScope(Scanner* self);
+ ~LiteralScope();
+ void Complete();
+
+ private:
+ Scanner* scanner_;
+ bool complete_;
+ };
+
// Construction
explicit Scanner(ParserMode parse_mode);
@@ -314,27 +324,34 @@ class Scanner {
// These functions only give the correct result if the literal
// was scanned between calls to StartLiteral() and TerminateLiteral().
const char* literal_string() const {
- return current_.literal_buffer->data();
+ return current_.literal_chars.start();
}
+
int literal_length() const {
- // Excluding terminal '\0' added by TerminateLiteral().
- return current_.literal_buffer->pos() - 1;
+ // Excluding terminal '\x00' added by TerminateLiteral().
+ return current_.literal_chars.length() - 1;
+ }
+
+ Vector<const char> literal() const {
+ return Vector<const char>(literal_string(), literal_length());
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
const char* next_literal_string() const {
- return next_.literal_buffer->data();
+ return next_.literal_chars.start();
}
+
+
// Returns the length of the next token (that would be returned if
// Next() were called).
int next_literal_length() const {
- return next_.literal_buffer->pos() - 1;
+ // Excluding terminal '\x00' added by TerminateLiteral().
+ return next_.literal_chars.length() - 1;
}
Vector<const char> next_literal() const {
- return Vector<const char>(next_literal_string(),
- next_literal_length());
+ return Vector<const char>(next_literal_string(), next_literal_length());
}
// Scans the input as a regular expression pattern, previous
@@ -371,7 +388,7 @@ class Scanner {
struct TokenDesc {
Token::Value token;
Location location;
- UTF8Buffer* literal_buffer;
+ Vector<const char> literal_chars;
};
void Init(Handle<String> source,
@@ -380,10 +397,12 @@ class Scanner {
ParserLanguage language);
// Literal buffer support
- void StartLiteral();
- void AddChar(uc32 ch);
- void AddCharAdvance();
- void TerminateLiteral();
+ inline void StartLiteral();
+ inline void AddChar(uc32 ch);
+ inline void AddCharAdvance();
+ inline void TerminateLiteral();
+ // Stops scanning of a literal, e.g., due to an encountered error.
+ inline void DropLiteral();
// Low-level scanning support.
void Advance() { c0_ = source_->Advance(); }
@@ -487,9 +506,8 @@ class Scanner {
SafeStringInputBuffer safe_string_input_buffer_;
// Buffer to hold literal values (identifiers, strings, numbers)
- // using 0-terminated UTF-8 encoding.
- UTF8Buffer literal_buffer_1_;
- UTF8Buffer literal_buffer_2_;
+ // using '\x00'-terminated UTF-8 encoding. Handles allocation internally.
+ UTF8Buffer literal_buffer_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
diff --git a/src/serialize.cc b/src/serialize.cc
index cdde07e3..cde7577c 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -241,16 +241,6 @@ void ExternalReferenceTable::PopulateTable() {
DEBUG_ADDRESS,
Debug::k_restarter_frame_function_pointer << kDebugIdShift,
"Debug::restarter_frame_function_pointer_address()");
- const char* debug_register_format = "Debug::register_address(%i)";
- int dr_format_length = StrLength(debug_register_format);
- for (int i = 0; i < kNumJSCallerSaved; ++i) {
- Vector<char> name = Vector<char>::New(dr_format_length + 1);
- OS::SNPrintF(name, debug_register_format, i);
- Add(Debug_Address(Debug::k_register_address, i).address(),
- DEBUG_ADDRESS,
- Debug::k_register_address << kDebugIdShift | i,
- name.start());
- }
#endif
// Stat counters
diff --git a/src/spaces.cc b/src/spaces.cc
index 67adafde..50afd031 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -68,6 +68,12 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
}
+HeapObjectIterator::HeapObjectIterator(Page* page,
+ HeapObjectCallback size_func) {
+ Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+}
+
+
void HeapObjectIterator::Initialize(Address cur, Address end,
HeapObjectCallback size_f) {
cur_addr_ = cur;
@@ -1996,77 +2002,88 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
}
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
- if (will_compact) {
- // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
- // to skip unused pages. Update flag value for all pages in space.
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
- Page* last_in_use = AllocationTopPage();
- bool in_use = true;
-
- while (all_pages_iterator.has_next()) {
- Page* p = all_pages_iterator.next();
- p->SetWasInUseBeforeMC(in_use);
- if (p == last_in_use) {
- // We passed a page containing allocation top. All consequent
- // pages are not used.
- in_use = false;
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
+ const bool add_to_freelist = true;
+
+ // Mark used and unused pages to properly fill unused pages
+ // after reordering.
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (all_pages_iterator.has_next()) {
+ Page* p = all_pages_iterator.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
+
+ if (page_list_is_chunk_ordered_) return;
+
+ Page* new_last_in_use = Page::FromAddress(NULL);
+ MemoryAllocator::RelinkPageListInChunkOrder(this,
+ &first_page_,
+ &last_page_,
+ &new_last_in_use);
+ ASSERT(new_last_in_use->is_valid());
+
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+ last_in_use->AllocationTop());
+
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
+ if (size_in_bytes > 0) {
+ Address start = last_in_use->AllocationTop();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
- if (!page_list_is_chunk_ordered_) {
- Page* new_last_in_use = Page::FromAddress(NULL);
- MemoryAllocator::RelinkPageListInChunkOrder(this,
- &first_page_,
- &last_page_,
- &new_last_in_use);
- ASSERT(new_last_in_use->is_valid());
-
- if (new_last_in_use != last_in_use) {
- // Current allocation top points to a page which is now in the middle
- // of page list. We should move allocation top forward to the new last
- // used page so various object iterators will continue to work properly.
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
- last_in_use->AllocationTop());
-
- if (size_in_bytes > 0) {
- // There is still some space left on this page. Create a fake
- // object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page
- // correctly.
-
- Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
- size_in_bytes);
- }
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop());
- // New last in use page was in the middle of the list before
- // sorting so it full.
- SetTop(new_last_in_use->AllocationTop());
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ }
- ASSERT(AllocationTopPage() == new_last_in_use);
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
- }
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+ while (pages_in_use_iterator.has_next()) {
+ Page* p = pages_in_use_iterator.next();
+ if (!p->WasInUseBeforeMC()) {
+ // Empty page is in the middle of a sequence of used pages.
+ // Allocate it as a whole and deallocate immediately.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+ p->ObjectAreaStart());
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
- while (pages_in_use_iterator.has_next()) {
- Page* p = pages_in_use_iterator.next();
- if (!p->WasInUseBeforeMC()) {
- // Empty page is in the middle of a sequence of used pages.
- // Create a fake object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page correctly.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
- p->ObjectAreaStart());
-
- p->SetAllocationWatermark(p->ObjectAreaStart());
- Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
- }
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ Address start = p->ObjectAreaStart();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
-
- page_list_is_chunk_ordered_ = true;
}
}
+
+ page_list_is_chunk_ordered_ = true;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ RelinkPageListInChunkOrder(false);
+ }
}
@@ -2201,6 +2218,13 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
}
+void OldSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Free(start, size_in_bytes, add_to_freelist);
+}
+
+
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
@@ -2475,6 +2499,21 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
}
+void FixedSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ // Free-list elements in fixed space are assumed to have a fixed size.
+ // We break the free block into chunks and add them to the free list
+ // individually.
+ int size = object_size_in_bytes();
+ ASSERT(size_in_bytes % size == 0);
+ Address end = start + size_in_bytes;
+ for (Address a = start; a < end; a += size) {
+ Free(a, add_to_freelist);
+ }
+}
+
+
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
@@ -2721,6 +2760,22 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
+
+LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+ // TODO(853): Change this implementation to only find executable
+ // chunks and use some kind of hash-based approach to speed it up.
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ Address chunk_address = chunk->address();
+ if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
+ return chunk;
+ }
+ }
+ return NULL;
+}
+
+
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
diff --git a/src/spaces.h b/src/spaces.h
index a6b8ea48..04e0c79f 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -756,6 +756,7 @@ class HeapObjectIterator: public ObjectIterator {
HeapObjectIterator(PagedSpace* space,
Address start,
HeapObjectCallback size_func);
+ HeapObjectIterator(Page* page, HeapObjectCallback size_func);
inline HeapObject* next() {
return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
@@ -1039,6 +1040,11 @@ class PagedSpace : public Space {
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
+ // Deallocates a block.
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) = 0;
+
// Set space allocation info.
void SetTop(Address top) {
allocation_info_.top = top;
@@ -1097,6 +1103,8 @@ class PagedSpace : public Space {
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
+
protected:
// Maximum capacity of this space.
int max_capacity_;
@@ -1814,6 +1822,10 @@ class OldSpace : public PagedSpace {
}
}
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
// Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -1888,6 +1900,9 @@ class FixedSpace : public PagedSpace {
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
@@ -2137,6 +2152,11 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
+ // Finds a large object page containing the given pc, returns NULL
+ // if such a page doesn't exist.
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 54d93845..7a490d3e 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -119,7 +119,7 @@ Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
Object* result =
- receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
+ receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -131,15 +131,14 @@ Object* StubCache::ComputeLoadField(String* name,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -152,15 +151,14 @@ Object* StubCache::ComputeLoadCallback(String* name,
AccessorInfo* callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -172,16 +170,15 @@ Object* StubCache::ComputeLoadConstant(String* name,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -192,15 +189,14 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -218,9 +214,8 @@ Object* StubCache::ComputeLoadGlobal(String* name,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
@@ -230,7 +225,7 @@ Object* StubCache::ComputeLoadGlobal(String* name,
is_dont_delete);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -242,15 +237,14 @@ Object* StubCache::ComputeKeyedLoadField(String* name,
JSObject* holder,
int field_index) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -262,16 +256,15 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
JSObject* holder,
Object* value) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -282,16 +275,15 @@ Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -303,16 +295,15 @@ Object* StubCache::ComputeKeyedLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
- Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -325,14 +316,13 @@ Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
ASSERT(receiver->IsJSObject());
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -361,14 +351,13 @@ Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
JSFunction* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Map* map = receiver->map();
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -387,7 +376,7 @@ Object* StubCache::ComputeStoreField(String* name,
code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -409,7 +398,7 @@ Object* StubCache::ComputeStoreGlobal(String* name,
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -427,7 +416,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
code = compiler.CompileStoreCallback(receiver, callback, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -444,7 +433,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
code = compiler.CompileStoreInterceptor(receiver, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -462,7 +451,7 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(
Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = receiver->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -481,7 +470,7 @@ Object* StubCache::ComputeCallConstant(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// Compute check type based on receiver/holder.
StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@@ -499,7 +488,7 @@ Object* StubCache::ComputeCallConstant(int argc,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
@@ -513,7 +502,7 @@ Object* StubCache::ComputeCallConstant(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -530,7 +519,7 @@ Object* StubCache::ComputeCallField(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -544,7 +533,7 @@ Object* StubCache::ComputeCallField(int argc,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallField(JSObject::cast(object),
@@ -555,7 +544,7 @@ Object* StubCache::ComputeCallField(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -570,7 +559,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(object, holder);
- Map* map = IC::GetCodeCacheMap(object, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -585,7 +574,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
cache_holder,
NOT_IN_LOOP,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
@@ -595,7 +584,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -623,14 +612,14 @@ Object* StubCache::ComputeCallGlobal(int argc,
JSFunction* function) {
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(receiver, holder);
- Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
+ JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
cache_holder,
in_loop,
argc);
- Object* code = map->FindInCodeCache(name, flags);
+ Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
@@ -643,7 +632,7 @@ Object* StubCache::ComputeCallGlobal(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = map->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map_holder->UpdateMapCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 19d54f8b..bf14a4fe 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -56,158 +56,167 @@ class StubCache : public AllStatic {
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
- static Object* ComputeLoadNonexistent(String* name, JSObject* receiver);
+ MUST_USE_RESULT static Object* ComputeLoadNonexistent(String* name,
+ JSObject* receiver);
- static Object* ComputeLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT static Object* ComputeLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- static Object* ComputeLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
- static Object* ComputeLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value);
+ MUST_USE_RESULT static Object* ComputeLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- static Object* ComputeLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder);
+ MUST_USE_RESULT static Object* ComputeLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
- static Object* ComputeLoadNormal();
+ MUST_USE_RESULT static Object* ComputeLoadNormal();
- static Object* ComputeLoadGlobal(String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- bool is_dont_delete);
+ MUST_USE_RESULT static Object* ComputeLoadGlobal(String* name,
+ JSObject* receiver,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ bool is_dont_delete);
// ---
- static Object* ComputeKeyedLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int field_index);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int field_index);
- static Object* ComputeKeyedLoadCallback(String* name,
- JSObject* receiver,
- JSObject* holder,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadCallback(
+ String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback);
- static Object* ComputeKeyedLoadConstant(String* name, JSObject* receiver,
- JSObject* holder, Object* value);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value);
- static Object* ComputeKeyedLoadInterceptor(String* name,
- JSObject* receiver,
- JSObject* holder);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadInterceptor(String* name,
+ JSObject* receiver,
+ JSObject* holder);
- static Object* ComputeKeyedLoadArrayLength(String* name, JSArray* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadArrayLength(String* name,
+ JSArray* receiver);
- static Object* ComputeKeyedLoadStringLength(String* name,
- String* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadStringLength(String* name,
+ String* receiver);
- static Object* ComputeKeyedLoadFunctionPrototype(String* name,
- JSFunction* receiver);
+ MUST_USE_RESULT static Object* ComputeKeyedLoadFunctionPrototype(
+ String* name,
+ JSFunction* receiver);
// ---
- static Object* ComputeStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition = NULL);
+ MUST_USE_RESULT static Object* ComputeStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
- static Object* ComputeStoreNormal();
+ MUST_USE_RESULT static Object* ComputeStoreNormal();
- static Object* ComputeStoreGlobal(String* name,
- GlobalObject* receiver,
- JSGlobalPropertyCell* cell);
+ MUST_USE_RESULT static Object* ComputeStoreGlobal(String* name,
+ GlobalObject* receiver,
+ JSGlobalPropertyCell* cell);
- static Object* ComputeStoreCallback(String* name,
- JSObject* receiver,
- AccessorInfo* callback);
+ MUST_USE_RESULT static Object* ComputeStoreCallback(String* name,
+ JSObject* receiver,
+ AccessorInfo* callback);
- static Object* ComputeStoreInterceptor(String* name, JSObject* receiver);
+ MUST_USE_RESULT static Object* ComputeStoreInterceptor(String* name,
+ JSObject* receiver);
// ---
- static Object* ComputeKeyedStoreField(String* name,
- JSObject* receiver,
- int field_index,
- Map* transition = NULL);
+ MUST_USE_RESULT static Object* ComputeKeyedStoreField(String* name,
+ JSObject* receiver,
+ int field_index,
+ Map* transition = NULL);
// ---
- static Object* ComputeCallField(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- int index);
-
- static Object* ComputeCallConstant(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder,
- JSFunction* function);
-
- static Object* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver);
-
- static Object* ComputeCallInterceptor(int argc,
- Code::Kind,
- String* name,
- Object* object,
- JSObject* holder);
-
- static Object* ComputeCallGlobal(int argc,
- InLoopFlag in_loop,
- Code::Kind,
- String* name,
- JSObject* receiver,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function);
+ MUST_USE_RESULT static Object* ComputeCallField(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ int index);
+
+ MUST_USE_RESULT static Object* ComputeCallConstant(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder,
+ JSFunction* function);
+
+ MUST_USE_RESULT static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ JSObject* receiver);
+
+ MUST_USE_RESULT static Object* ComputeCallInterceptor(int argc,
+ Code::Kind,
+ String* name,
+ Object* object,
+ JSObject* holder);
+
+ MUST_USE_RESULT static Object* ComputeCallGlobal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind,
+ String* name,
+ JSObject* receiver,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function);
// ---
- static Object* ComputeCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallPreMonomorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallPreMonomorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallNormal(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallNormal(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallMegamorphic(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallMegamorphic(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
- static Object* ComputeCallMiss(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallMiss(int argc, Code::Kind kind);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindCallInitialize(int argc,
- InLoopFlag in_loop,
- Code::Kind kind);
+ MUST_USE_RESULT static Code* FindCallInitialize(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Object* ComputeCallDebugBreak(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallDebugBreak(int argc,
+ Code::Kind kind);
- static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
+ MUST_USE_RESULT static Object* ComputeCallDebugPrepareStepIn(int argc,
+ Code::Kind kind);
#endif
// Update cache for entry hash(name, map).
diff --git a/src/token.h b/src/token.h
index 0d8960b8..ebc7fea1 100644
--- a/src/token.h
+++ b/src/token.h
@@ -248,6 +248,10 @@ class Token {
return op == INC || op == DEC;
}
+ static bool IsShiftOp(Value op) {
+ return (SHL <= op) && (op <= SHR);
+ }
+
// Returns a string corresponding to the JS token string
// (.e., "<" for the token LT) or NULL if the token doesn't
// have a (unique) string (e.g. an IDENTIFIER).
diff --git a/src/top.cc b/src/top.cc
index 82960270..e172cb86 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -69,7 +69,6 @@ void ThreadLocalTop::Initialize() {
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0;
#endif
- stack_is_cooked_ = false;
try_catch_handler_address_ = NULL;
context_ = NULL;
int id = ThreadManager::CurrentId();
@@ -303,39 +302,6 @@ void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
}
-void Top::MarkCompactPrologue(bool is_compacting) {
- MarkCompactPrologue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, char* data) {
- MarkCompactPrologue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::CookFramesForThread(thread);
- }
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, char* data) {
- MarkCompactEpilogue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting) {
- MarkCompactEpilogue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::UncookFramesForThread(thread);
- }
-}
-
static int stack_trace_nesting_level = 0;
static StringStream* incomplete_message = NULL;
diff --git a/src/top.h b/src/top.h
index 87333931..776c43e3 100644
--- a/src/top.h
+++ b/src/top.h
@@ -104,9 +104,6 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif
- bool stack_is_cooked_;
- inline bool stack_is_cooked() { return stack_is_cooked_; }
- inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
// Generated code scratch locations.
int32_t formal_count_;
@@ -260,12 +257,6 @@ class Top {
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
- static void MarkCompactPrologue(bool is_compacting,
- char* archived_thread_data);
- static void MarkCompactEpilogue(bool is_compacting,
- char* archived_thread_data);
static void PrintCurrentStackTrace(FILE* out);
static void PrintStackTrace(FILE* out, char* thread_data);
static void PrintStack(StringStream* accumulator);
diff --git a/src/utils.h b/src/utils.h
index 2885c520..d605891e 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -326,9 +326,9 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
- ASSERT(from < length_);
ASSERT(to <= length_);
ASSERT(from < to);
+ ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
@@ -476,6 +476,213 @@ inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
}
+/*
+ * A class that collects values into a backing store.
+ * Specialized versions of the class can allow access to the backing store
+ * in different ways.
+ * There is no guarantee that the backing store is contiguous (and, as a
+ * consequence, no guarantees that consecutively added elements are adjacent
+ * in memory). The collector may move elements unless it has guaranteed not
+ * to.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class Collector {
+ public:
+ explicit Collector(int initial_capacity = kMinCapacity)
+ : index_(0), size_(0) {
+ if (initial_capacity < kMinCapacity) {
+ initial_capacity = kMinCapacity;
+ }
+ current_chunk_ = Vector<T>::New(initial_capacity);
+ }
+
+ virtual ~Collector() {
+ // Free backing store (in reverse allocation order).
+ current_chunk_.Dispose();
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ }
+
+ // Add a single element.
+ inline void Add(T value) {
+ if (index_ >= current_chunk_.length()) {
+ Grow(1);
+ }
+ current_chunk_[index_] = value;
+ index_++;
+ size_++;
+ }
+
+ // Add a block of contiguous elements and return a Vector backed by the
+ // memory area.
+ // A basic Collector will keep this vector valid as long as the Collector
+ // is alive.
+ inline Vector<T> AddBlock(int size, T initial_value) {
+ ASSERT(size > 0);
+ if (size > current_chunk_.length() - index_) {
+ Grow(size);
+ }
+ T* position = current_chunk_.start() + index_;
+ index_ += size;
+ size_ += size;
+ for (int i = 0; i < size; i++) {
+ position[i] = initial_value;
+ }
+ return Vector<T>(position, size);
+ }
+
+
+ // Write the contents of the collector into the provided vector.
+ void WriteTo(Vector<T> destination) {
+ ASSERT(size_ <= destination.length());
+ int position = 0;
+ for (int i = 0; i < chunks_.length(); i++) {
+ Vector<T> chunk = chunks_.at(i);
+ for (int j = 0; j < chunk.length(); j++) {
+ destination[position] = chunk[j];
+ position++;
+ }
+ }
+ for (int i = 0; i < index_; i++) {
+ destination[position] = current_chunk_[i];
+ position++;
+ }
+ }
+
+ // Allocate a single contiguous vector, copy all the collected
+ // elements to the vector, and return it.
+ // The caller is responsible for freeing the memory of the returned
+ // vector (e.g., using Vector::Dispose).
+ Vector<T> ToVector() {
+ Vector<T> new_store = Vector<T>::New(size_);
+ WriteTo(new_store);
+ return new_store;
+ }
+
+ // Resets the collector to be empty.
+ virtual void Reset() {
+ for (int i = chunks_.length() - 1; i >= 0; i--) {
+ chunks_.at(i).Dispose();
+ }
+ chunks_.Rewind(0);
+ index_ = 0;
+ size_ = 0;
+ }
+
+ // Total number of elements added to collector so far.
+ inline int size() { return size_; }
+
+ protected:
+ static const int kMinCapacity = 16;
+ List<Vector<T> > chunks_;
+ Vector<T> current_chunk_; // Block of memory currently being written into.
+ int index_; // Current index in current chunk.
+ int size_; // Total number of elements in collector.
+
+ // Creates a new current chunk, and stores the old chunk in the chunks_ list.
+ void Grow(int min_capacity) {
+ ASSERT(growth_factor > 1);
+ int growth = current_chunk_.length() * (growth_factor - 1);
+ if (growth > max_growth) {
+ growth = max_growth;
+ }
+ int new_capacity = current_chunk_.length() + growth;
+ if (new_capacity < min_capacity) {
+ new_capacity = min_capacity + growth;
+ }
+ Vector<T> new_chunk = Vector<T>::New(new_capacity);
+ int new_index = PrepareGrow(new_chunk);
+ if (index_ > 0) {
+ chunks_.Add(current_chunk_.SubVector(0, index_));
+ } else {
+ // Can happen if the call to PrepareGrow moves everything into
+ // the new chunk.
+ current_chunk_.Dispose();
+ }
+ current_chunk_ = new_chunk;
+ index_ = new_index;
+ ASSERT(index_ + min_capacity <= current_chunk_.length());
+ }
+
+ // Before replacing the current chunk, give a subclass the option to move
+ // some of the current data into the new chunk. The function may update
+ // the current index_ value to represent data no longer in the current chunk.
+ // Returns the initial index of the new chunk (after copied data).
+ virtual int PrepareGrow(Vector<T> new_chunk) {
+ return 0;
+ }
+};
+
+
+/*
+ * A collector that allows sequences of values to be guaranteed to
+ * stay consecutive.
+ * If the backing store grows while a sequence is active, the current
+ * sequence might be moved, but after the sequence is ended, it will
+ * not move again.
+ * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
+ * as well, if inside an active sequence where another element is added.
+ */
+template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
+class SequenceCollector : public Collector<T, growth_factor, max_growth> {
+ public:
+ explicit SequenceCollector(int initial_capacity)
+ : Collector<T, growth_factor, max_growth>(initial_capacity),
+ sequence_start_(kNoSequence) { }
+
+ virtual ~SequenceCollector() {}
+
+ void StartSequence() {
+ ASSERT(sequence_start_ == kNoSequence);
+ sequence_start_ = this->index_;
+ }
+
+ Vector<T> EndSequence() {
+ ASSERT(sequence_start_ != kNoSequence);
+ int sequence_start = sequence_start_;
+ sequence_start_ = kNoSequence;
+ if (sequence_start == this->index_) return Vector<T>();
+ return this->current_chunk_.SubVector(sequence_start, this->index_);
+ }
+
+ // Drops the currently added sequence, and all collected elements in it.
+ void DropSequence() {
+ ASSERT(sequence_start_ != kNoSequence);
+ int sequence_length = this->index_ - sequence_start_;
+ this->index_ = sequence_start_;
+ this->size_ -= sequence_length;
+ sequence_start_ = kNoSequence;
+ }
+
+ virtual void Reset() {
+ sequence_start_ = kNoSequence;
+ this->Collector<T, growth_factor, max_growth>::Reset();
+ }
+
+ private:
+ static const int kNoSequence = -1;
+ int sequence_start_;
+
+ // Move the currently active sequence to the new chunk.
+ virtual int PrepareGrow(Vector<T> new_chunk) {
+ if (sequence_start_ != kNoSequence) {
+ int sequence_length = this->index_ - sequence_start_;
+ // The new chunk is always larger than the current chunk, so there
+ // is room for the copy.
+ ASSERT(sequence_length < new_chunk.length());
+ for (int i = 0; i < sequence_length; i++) {
+ new_chunk[i] = this->current_chunk_[sequence_start_ + i];
+ }
+ this->index_ = sequence_start_;
+ sequence_start_ = 0;
+ return sequence_length;
+ }
+ return 0;
+ }
+};
+
+
// Simple support to read a file into a 0-terminated C-string.
// The returned buffer must be freed by the caller.
// On return, *exits tells whether the file existed.
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 2f2edfec..b8b3d9f6 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -67,6 +67,7 @@ namespace internal {
SC(pcre_mallocs, V8.PcreMallocCount) \
/* OS Memory allocated */ \
SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(normalized_maps, V8.NormalizedMaps) \
SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
SC(alive_after_last_gc, V8.AliveAfterLastGC) \
@@ -84,6 +85,11 @@ namespace internal {
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
SC(regexp_cache_hits, V8.RegExpCacheHits) \
SC(regexp_cache_misses, V8.RegExpCacheMisses) \
+ SC(string_ctor_calls, V8.StringConstructorCalls) \
+ SC(string_ctor_conversions, V8.StringConstructorConversions) \
+ SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
+ SC(string_ctor_string_value, V8.StringConstructorStringValue) \
+ SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
/* Amount of evaled source code. */ \
SC(total_eval_size, V8.TotalEvalSize) \
/* Amount of loaded source code. */ \
@@ -101,7 +107,10 @@ namespace internal {
/* Number of contexts created from scratch. */ \
SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
/* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
+ SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
+ /* Number of code objects found from pc. */ \
+ SC(pc_to_code, V8.PcToCode) \
+ SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
diff --git a/src/v8.h b/src/v8.h
index f761d381..9dbdf4c2 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -60,9 +60,6 @@
#include "flags.h"
// Objects & heap
-#include "objects.h"
-#include "spaces.h"
-#include "heap.h"
#include "objects-inl.h"
#include "spaces-inl.h"
#include "heap-inl.h"
diff --git a/src/v8natives.js b/src/v8natives.js
index 85540e85..ca1c99d4 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -111,13 +111,20 @@ function GlobalParseInt(string, radix) {
if (!(radix == 0 || (2 <= radix && radix <= 36)))
return $NaN;
}
- return %StringParseInt(ToString(string), radix);
+ string = TO_STRING_INLINE(string);
+ if (%_HasCachedArrayIndex(string) &&
+ (radix == 0 || radix == 10)) {
+ return %_GetCachedArrayIndex(string);
+ }
+ return %StringParseInt(string, radix);
}
// ECMA-262 - 15.1.2.3
function GlobalParseFloat(string) {
- return %StringParseFloat(ToString(string));
+ string = TO_STRING_INLINE(string);
+ if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
+ return %StringParseFloat(string);
}
@@ -743,8 +750,8 @@ function ObjectSeal(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) desc.setConfigurable(false);
DefineOwnProperty(obj, name, desc, true);
@@ -759,8 +766,8 @@ function ObjectFreeze(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (IsDataDescriptor(desc)) desc.setWritable(false);
if (desc.isConfigurable()) desc.setConfigurable(false);
@@ -786,8 +793,8 @@ function ObjectIsSealed(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) return false;
}
@@ -804,8 +811,8 @@ function ObjectIsFrozen(obj) {
throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
}
var names = ObjectGetOwnPropertyNames(obj);
- for (var key in names) {
- var name = names[key];
+ for (var i = 0; i < names.length; i++) {
+ var name = names[i];
var desc = GetOwnProperty(obj, name);
if (IsDataDescriptor(desc) && desc.isWritable()) return false;
if (desc.isConfigurable()) return false;
@@ -836,6 +843,7 @@ function ObjectIsExtensible(obj) {
}
});
+%SetExpectedNumberOfProperties($Object, 4);
// ----------------------------------------------------------------------------
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 1e5e82e6..b6e656d2 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -342,28 +342,6 @@ void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
}
-void ThreadManager::MarkCompactPrologue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactPrologue(is_compacting, data);
- }
-}
-
-
-void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactEpilogue(is_compacting, data);
- }
-}
-
-
int ThreadManager::CurrentId() {
return Thread::GetThreadLocalInt(thread_id_key);
}
diff --git a/src/v8threads.h b/src/v8threads.h
index ca42354c..da56d052 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -105,8 +105,6 @@ class ThreadManager : public AllStatic {
static void Iterate(ObjectVisitor* v);
static void IterateArchivedThreads(ThreadVisitor* v);
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
static int CurrentId();
diff --git a/src/version.cc b/src/version.cc
index ecee6eb9..1afcaf27 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,10 +33,10 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 3
-#define BUILD_NUMBER 10
+#define MINOR_VERSION 4
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
-#define CANDIDATE_VERSION false
+#define CANDIDATE_VERSION true
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 7cc493e5..85ad6371 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -875,6 +875,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // TODO(849): implement custom construct stub.
+ // Generate a copy of the generic stub for now.
+ Generate_JSConstructStubGeneric(masm);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -897,10 +904,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// rdi: called object
// rax: number of arguments
__ bind(&non_function_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc+1.
- __ movq(Operand(rsp, rax, times_pointer_size, kPointerSize), rdi);
// Set expected number of arguments to zero (not changing rax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
new file mode 100644
index 00000000..c75b9455
--- /dev/null
+++ b/src/x64/code-stubs-x64.cc
@@ -0,0 +1,4015 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in rsi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function info from the stack.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+ __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+ __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+ __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
+ __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
+ __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(rcx); // Temporarily remove return address.
+ __ pop(rdx);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx); // Restore return address.
+ __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+
+ // Setup the fixed slots.
+ __ xor_(rbx, rbx); // Set to NULL.
+ __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+
+ // Copy the global object from the surrounding context.
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: constant elements.
+ // [rsp + (2 * kPointerSize)]: literal index.
+ // [rsp + (3 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into rcx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(rcx);
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ expected_map_index);
+ __ Assert(equal, message);
+ __ pop(rcx);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rdx, i), rbx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ // We don't use CmpObjectType because we manipulate the type field.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ __ j(above_equal, &not_string);
+ __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rdx);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result);
+ // HeapNumber => false iff +0, -0, or NaN.
+ // These three cases set the zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in rax.
+ __ bind(&true_result);
+ __ movq(rax, Immediate(1));
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ xor_(rax, rax);
+ __ ret(1 * kPointerSize);
+}
+
+
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ static_operands_type_.ToString(),
+ BinaryOpIC::GetName(runtime_operands_type_));
+ return name_;
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, left and right_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite left before moving
+ // it to left_arg.
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // For non-commutative operations, right and left_arg might be
+ // the same register. Therefore, the order of the moves is
+ // important here in order to not overwrite right before moving
+ // it to right_arg.
+ __ movq(right_arg, right);
+ __ Move(left_arg, left);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
+};
+
+
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
+ // dividend in rax and rdx free for the division. Use rax, rbx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = rdx;
+ Register right = rax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = rax;
+ right = rbx;
+ if (HasArgsInRegisters()) {
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ movq(right, Operand(rsp, 1 * kPointerSize));
+ __ movq(left, Operand(rsp, 2 * kPointerSize));
+ }
+
+ Label not_smis;
+ // 2. Smi check both operands.
+ if (static_operands_type_.IsSmi()) {
+ // Skip smi check if we know that both arguments are smis.
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+ if (op_ == Token::BIT_OR) {
+ // Handle OR here, since we do extra smi-checking in the or code below.
+ __ SmiOr(right, right, left);
+ GenerateReturn(masm);
+ return;
+ }
+ } else {
+ if (op_ != Token::BIT_OR) {
+ // Skip the check for OR as it is better combined with the
+ // actual operation.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, &not_smis);
+ }
+ }
+
+ // 3. Operands are both smis (except for OR), perform the operation leaving
+ // the result in rax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op_) {
+ case Token::ADD: {
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+ }
+
+ case Token::SUB: {
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+ }
+
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
+ case Token::DIV:
+ ASSERT(left.is(rax));
+ __ SmiDiv(left, left, right, &use_fp_on_smis);
+ break;
+
+ case Token::MOD:
+ ASSERT(left.is(rax));
+ __ SmiMod(left, left, right, slow);
+ break;
+
+ case Token::BIT_OR:
+ ASSERT(right.is(rax));
+ __ movq(rcx, right); // Save the right operand.
+ __ SmiOr(right, right, left); // BIT_OR is commutative.
+ __ testb(right, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis);
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ switch (op_) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, slow);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // 4. Emit return of result in rax.
+ GenerateReturn(masm);
+
+ // 5. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ ASSERT(use_fp_on_smis.is_linked());
+ __ bind(&use_fp_on_smis);
+ if (op_ == Token::DIV) {
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ }
+ // left is rdx, right is rax.
+ __ AllocateHeapNumber(rbx, rcx, slow);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rbx);
+ GenerateReturn(masm);
+ }
+ default:
+ break;
+ }
+
+ // 6. Non-smi operands, fall out to the non-smi code with the operands in
+ // rdx and rax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+
+ switch (op_) {
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in rax, rbx at this point.
+ __ movq(rdx, rax);
+ __ movq(rax, rbx);
+ break;
+
+ case Token::BIT_OR:
+ // Right operand is saved in rcx and rax was destroyed by the smi
+ // operation.
+ __ movq(rax, rcx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (ShouldGenerateSmiCode()) {
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) {
+ if (!HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+ }
+ // Floating point case.
+ if (ShouldGenerateFPCode()) {
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-smi argument occurs
+ // (and only if smi code is generated). This is the right moment to
+ // patch to HEAP_NUMBERS state. The transition is attempted only for
+ // the four basic operations. The stub stays in the DEFAULT state
+ // forever for all other operations (also if smi code is skipped).
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ Label not_floats;
+ // rax: y
+ // rdx: x
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT:
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ __ movq(rax, rdx);
+ break;
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ __ bind(&not_floats);
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+ !HasSmiCodeInStub()) {
+ // Execution reaches this point when the first non-number argument
+ // occurs (and only if smi code is skipped from the stub, otherwise
+ // the patching has already been done earlier in this case branch).
+ // A perfect moment to try patching to STRINGS for ADD operation.
+ if (op_ == Token::ADD) {
+ GenerateTypeTransition(masm);
+ }
+ }
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
+ } else {
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
+ }
+ switch (op_) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ GenerateReturn(masm);
+
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
+ // Allocate a heap number if needed.
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ GenerateReturn(masm);
+ }
+
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
+ __ bind(&call_runtime);
+
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ switch (op_) {
+ case Token::ADD: {
+ // Registers containing left and right operands respectively.
+ Register lhs, rhs;
+
+ if (HasArgsReversed()) {
+ lhs = rax;
+ rhs = rdx;
+ } else {
+ lhs = rdx;
+ rhs = rax;
+ }
+
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+ // If this stub has already generated FP-specific code then the arguments
+ // are already in rdx and rax.
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+ GenerateLoadArguments(masm);
+ }
+
+ Condition is_smi;
+ is_smi = masm->CheckSmi(lhs);
+ __ j(is_smi, &not_string1);
+ __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &not_string1);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &string1_smi2);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&string1_smi2);
+ // First argument is a string, second is a smi. Try to lookup the number
+ // string for the smi in the number string cache.
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm, rhs, rbx, rcx, r8, true, &string1);
+
+ // Replace second argument on stack and tailcall string add stub to make
+ // the result.
+ __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+ __ TailCallStub(&string_add_stub);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(&not_string1);
+ is_smi = masm->CheckSmi(rhs);
+ __ j(is_smi, &not_strings);
+ __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
+ __ j(above_equal, &not_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(&not_strings);
+ // Neither argument is a string.
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ ASSERT(!HasArgsInRegisters());
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ ASSERT(HasArgsInRegisters());
+ __ pop(rcx);
+ if (HasArgsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ // Ensure the operands are on the stack.
+ if (HasArgsInRegisters()) {
+ GenerateRegisterArgsPush(masm);
+ }
+
+ // Left and right arguments are already on stack.
+ __ pop(rcx); // Save the return address.
+
+ // Push this stub's key.
+ __ Push(Smi::FromInt(MinorKey()));
+
+ // Although the operation and the type info are encoded into the key,
+ // the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(op_));
+
+ __ Push(Smi::FromInt(runtime_operands_type_));
+
+ __ push(rcx); // The return address.
+
+ // Perform patching to an appropriate fast case and return the result.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Input on stack:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ Label runtime_call;
+ Label runtime_call_clear_stack;
+ Label input_not_smi;
+ Label loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kPointerSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ Move(rbx, Factory::heap_number_map());
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+ __ bind(&loaded);
+ // ST[0] == double value
+ // rbx = bits of double value.
+ // rdx = also bits of double value.
+ // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
+ // h = h0 = bits ^ (bits >> 32);
+ // h ^= h >> 16;
+ // h ^= h >> 8;
+ // h = h & (cacheSize - 1);
+ // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+ __ sar(rdx, Immediate(32));
+ __ xorl(rdx, rbx);
+ __ movl(rcx, rdx);
+ __ movl(rax, rdx);
+ __ movl(rdi, rdx);
+ __ sarl(rdx, Immediate(8));
+ __ sarl(rcx, Immediate(16));
+ __ sarl(rax, Immediate(24));
+ __ xorl(rcx, rdx);
+ __ xorl(rax, rdi);
+ __ xorl(rcx, rax);
+ ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+ __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+
+ // ST[0] == double value.
+ // rbx = bits of double value.
+ // rcx = TranscendentalCache::hash(double value).
+ __ movq(rax, ExternalReference::transcendental_cache_array_address());
+ // rax points to cache array.
+ __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+ // rax points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ testq(rax, rax);
+ __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { // NOLINT - doesn't like a single brace on a line.
+ TranscendentalCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ // Two uint_32's and a pointer per element.
+ CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+ CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+ CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+ CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+ }
+#endif
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+ __ addl(rcx, rcx);
+ __ lea(rcx, Operand(rax, rcx, times_8, 0));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ Label cache_miss;
+ __ cmpq(rbx, Operand(rcx, 0));
+ __ j(not_equal, &cache_miss);
+ // Cache hit!
+ __ movq(rax, Operand(rcx, 2 * kIntSize));
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+
+ __ bind(&cache_miss);
+ // Update cache with new value.
+ Label nan_result;
+ GenerateOperation(masm, &nan_result);
+ __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ ret(kPointerSize);
+
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+
+ __ bind(&nan_result);
+ __ fstp(0); // Remove argument from FPU stack.
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ movq(Operand(rcx, 0), rbx);
+ __ movq(Operand(rcx, 2 * kIntSize), rax);
+ __ ret(kPointerSize);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+ Label* on_nan_result) {
+ // Registers:
+ // rbx: Bits of input double. Must be preserved.
+ // rcx: Pointer to cache entry. Must be preserved.
+ // st(0): Input double
+ Label done;
+ ASSERT(type_ == TranscendentalCache::SIN ||
+ type_ == TranscendentalCache::COS);
+ // More transcendental types can be added later.
+
+ // Both fsin and fcos require arguments in the range +/-2^63 and
+ // return NaN for infinities and NaN. They can share all code except
+ // the actual fsin/fcos operation.
+ Label in_range;
+ // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+ // work. We must reduce it to the appropriate range.
+ __ movq(rdi, rbx);
+ // Move exponent and sign bits to low bits.
+ __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+ // Remove sign bit.
+ __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
+ int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+ __ cmpl(rdi, Immediate(supported_exponent_limit));
+ __ j(below, &in_range);
+ // Check for infinity and NaN. Both return NaN for sin.
+ __ cmpl(rdi, Immediate(0x7ff));
+ __ j(equal, on_nan_result);
+
+ // Use fpmod to restrict argument to the range +/-2*PI.
+ __ fldpi();
+ __ fadd(0);
+ __ fld(1);
+ // FPU Stack: input, 2*pi, input.
+ {
+ Label no_exceptions;
+ __ fwait();
+ __ fnstsw_ax();
+ // Clear if Illegal Operand or Zero Division exceptions are set.
+ __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
+ __ j(zero, &no_exceptions);
+ __ fnclex();
+ __ bind(&no_exceptions);
+ }
+
+ // Compute st(0) % st(1)
+ {
+ Label partial_remainder_loop;
+ __ bind(&partial_remainder_loop);
+ __ fprem1();
+ __ fwait();
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
+ // If C2 is set, computation only has partial result. Loop to
+ // continue computation.
+ __ j(not_zero, &partial_remainder_loop);
+ }
+ // FPU Stack: input, 2*pi, input % 2*pi
+ __ fstp(2);
+ // FPU Stack: input % 2*pi, 2*pi,
+ __ fstp(0);
+ // FPU Stack: input % 2*pi
+ __ bind(&in_range);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ fsin();
+ break;
+ case TranscendentalCache::COS:
+ __ fcos();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&done);
+}
+
+
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ Label done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
+
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
+ } else {
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
+ }
+
+ __ bind(&done);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ // Check float operands.
+ Label done;
+ Label rax_is_smi;
+ Label rax_is_object;
+ Label rdx_is_object;
+
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
+
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
+
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
+ __ SmiToInteger32(rcx, rax);
+
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, rdx);
+
+ // Here rdx has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the rax heap number in rcx.
+ IntegerConvert(masm, rcx, rax);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ if (negative_zero_ == kIgnoreNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(equal, &done);
+ }
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ if (negative_zero_ == kStrictNegativeZero) {
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+ } else {
+ __ jmp(&slow);
+ }
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_ == UNARY_OVERWRITE) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
+
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ }
+
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The key is in rdx and the parameter count is in rax.
+
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ JumpIfNotSmi(rdx, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register rax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmpq(rdx, rax);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmpq(rdx, rcx);
+ __ j(above_equal, &slow);
+
+ // Read the argument from the stack and return it.
+ index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+ __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+ index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+ __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ pop(rbx); // Return address.
+ __ push(rdx);
+ __ push(rbx);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // rsp[0] : return address
+ // rsp[8] : number of parameters
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
+ // Do not clobber the length index for the indexing operation since
+ // it is used compute the size for allocation later.
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ testl(rcx, rcx);
+ __ j(zero, &add_arguments_object);
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ movq(rdi, Operand(rdi, offset));
+
+ // Copy the JS object part.
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ SmiTest(rcx);
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+ __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ addq(rdi, Immediate(kPointerSize));
+ __ subq(rdx, Immediate(kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[8]: last_match_info (expected JSArray)
+ // esp[16]: previous index
+ // esp[24]: subject string
+ // esp[32]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ testq(kScratchRegister, kScratchRegister);
+ __ j(zero, &runtime);
+
+
+ // Check that the first argument is a JSRegExp object.
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ Condition is_smi = masm->CheckSmi(rcx);
+ __ Check(NegateCondition(is_smi),
+ "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+ }
+
+ // rcx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
+ __ j(not_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
+ // Check that the static offsets vector buffer is large enough.
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ j(above, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the second argument is a string.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rax, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
+ // Check that the third argument is a positive smi less than the string
+ // length. A negative value will be greater (unsigned comparison).
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // rdx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rax, &runtime);
+ __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
+
+ // rcx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_ascii_string, seq_two_byte_string, check_code;
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ // First check for flat two byte string.
+ __ andb(rbx, Immediate(
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be a flat ascii string.
+ __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_ascii_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+ __ j(not_zero, &runtime);
+ // String is a cons string.
+ __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ Cmp(rdx, Factory::empty_string());
+ __ j(not_equal, &runtime);
+ __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // String is a cons string with empty second part.
+ // rax: first part of cons string.
+ // rbx: map of first part of cons string.
+ // Is first part a flat two byte string?
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask | kStringEncodingMask));
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
+ __ j(zero, &seq_two_byte_string);
+ // Any other flat string must be ascii.
+ __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+ Immediate(kStringRepresentationMask));
+ __ j(not_zero, &runtime);
+
+ __ bind(&seq_ascii_string);
+ // rax: subject string (sequential ascii)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rdi, 1); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // rax: subject string (flat two-byte)
+ // rcx: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rdi, 0); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // the hole.
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+
+ // rax: subject string
+ // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // r11: code
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // rsi is caller save on Windows and used to pass parameter on Linux.
+ __ push(rsi);
+
+ static const int kRegExpExecuteArguments = 7;
+ __ PrepareCallCFunction(kRegExpExecuteArguments);
+ int argument_slots_on_stack =
+ masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
+ Immediate(1));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
+ __ movq(r9, Operand(kScratchRegister, 0));
+ __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
+ __ addq(r9, Operand(kScratchRegister, 0));
+ // Argument 6 passed in r9 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
+#endif
+
+ // Argument 5: static offsets vector buffer.
+ __ movq(r8, ExternalReference::address_of_static_offsets_vector());
+ // Argument 5 passed in r8 on Linux and on the stack on Windows.
+#ifdef _WIN64
+ __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
+#endif
+
+ // First four arguments are passed in registers on both Linux and Windows.
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // Keep track on aliasing between argX defined above and the registers used.
+ // rax: subject string
+ // rbx: previous index
+ // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // r11: code
+
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label setup_two_byte, setup_rest;
+ __ testb(rdi, rdi);
+ __ j(zero, &setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ jmp(&setup_rest);
+ __ bind(&setup_two_byte);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+
+ __ bind(&setup_rest);
+ // Argument 2: Previous index.
+ __ movq(arg2, rbx);
+
+ // Argument 1: Subject string.
+ __ movq(arg1, rax);
+
+ // Locate the code entry and call it.
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
+
+ // rsi is caller save, as it is used to pass parameter.
+ __ pop(rsi);
+
+ // Check the result.
+ Label success;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ j(equal, &success);
+ Label failure;
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ j(equal, &failure);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ Cmp(kScratchRegister, Factory::the_hole_value());
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ Move(rax, Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ movq(rax, Operand(rsp, kJSRegExpOffset));
+ __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
+
+ // rdx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rdx: number of capture registers
+ // Store the capture count.
+ __ Integer32ToSmi(kScratchRegister, rdx);
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ kScratchRegister);
+ // Store last subject and last input.
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+ __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
+ __ movq(rcx, rbx);
+ __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
+
+ // rbx: last_match_info backing store (FixedArray)
+ // rcx: offsets vector
+ // rdx: number of capture registers
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ subq(rdx, Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer and make it a smi.
+ __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
+ __ Integer32ToSmi(rdi, rdi, &runtime);
+ // Store the smi value in the last match info.
+ __ movq(FieldOperand(rbx,
+ rdx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ rdi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
+ __ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ Register probe = mask;
+ __ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope fscope(SSE2);
+ __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache);
+ }
+
+ __ bind(&is_smi);
+ __ SmiToInteger32(scratch, object);
+ GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+ Register index = scratch;
+ // Check if the entry is the smi we are looking for.
+ __ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ __ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask) {
+ __ and_(hash, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ __ shl(hash, Immediate(kPointerSizeLog2 + 1));
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ movq(rbx, Operand(rsp, kPointerSize));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+ __ ret(1 * kPointerSize);
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+ ASSERT(cc != equal);
+ ASSERT((cc == less) || (cc == less_equal)
+ || (cc == greater) || (cc == greater_equal));
+ return (cc == greater || cc == greater_equal) ? LESS : GREATER;
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ Label check_unequal_objects, done;
+ // The compare stub returns a positive, negative, or zero 64-bit integer
+ // value in rax, corresponding to result of comparing the two inputs.
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Two identical objects are equal unless they are both NaN or undefined.
+ {
+ Label not_identical;
+ __ cmpq(rax, rdx);
+ __ j(not_equal, &not_identical);
+
+ if (cc_ != equal) {
+ // Check for undefined. undefined OP undefined is false even though
+ // undefined == undefined.
+ Label check_for_nan;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check_for_nan);
+ __ Set(rax, NegativeComparisonResult(cc_));
+ __ ret(0);
+ __ bind(&check_for_nan);
+ }
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(rax, EQUAL);
+ __ ret(0);
+ } else {
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
+ }
+
+ __ bind(&not_identical);
+ }
+
+ if (cc_ == equal) { // Both strict and non-strict.
+ Label slow; // Fallthrough label.
+
+ // If we're doing a strict equality comparison, we don't have to do
+ // type conversion, so we generate code to do fast comparison for objects
+ // and oddballs. Non-smi numbers and strings still go through the usual
+ // slow-case code.
+ if (strict_) {
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ {
+ Label not_smis;
+ __ SelectNonSmi(rbx, rax, rdx, &not_smis);
+
+ // Check if the non-smi operand is a heap number.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal. ebx (the lower half of rbx) is not zero.
+ __ movq(rax, rbx);
+ __ ret(0);
+
+ __ bind(&not_smis);
+ }
+
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
+
+ // If the first object is a JS object, we have done pointer comparison.
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &first_non_object);
+ // Return non-zero (eax (not rax) is not zero)
+ Label return_not_equal;
+ STATIC_ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(rcx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
+
+ // Fall through to the general case.
+ }
+ __ bind(&slow);
+ }
+
+ // Generate the number comparison code.
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
+ }
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
+
+ // Fast negative check for symbol-to-symbol equality.
+ Label check_for_strings;
+ if (cc_ == equal) {
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands
+ // are symbols they aren't equal. Register eax (not rax) already holds a
+ // non-zero value, which indicates not equal, so just return.
+ __ ret(0);
+ }
+
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, &not_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &not_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(0);
+ __ bind(&not_both_objects);
+ }
+
+ // Push arguments below the return address to prepare jump to builtin.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ }
+
+ // Restore return address on the stack.
+ __ push(rcx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(rax);
+ __ Push(Smi::FromInt(0));
+ __ push(rax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ JumpIfSmi(rdi, &slow);
+ // Goto slow case if we do not have a function.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &slow);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
+ __ Set(rax, argc_);
+ __ Set(rbx, 0);
+ __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ __ pop(rcx);
+ __ movq(Operand(kScratchRegister, 0), rcx);
+ __ pop(rbp); // pop frame pointer
+ __ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ xor_(rsi, rsi); // tentatively set context pointer to NULL
+ Label skip;
+ __ cmpq(rbp, Immediate(0));
+ __ j(equal, &skip);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+ __ ret(0);
+}
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label empty_result;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(kStackSpace, 0);
+ ASSERT_EQ(kArgc, 4);
+#ifdef _WIN64
+ // All the parameters should be set up by a caller.
+#else
+ // Set 1st parameter register with property name.
+ __ movq(rsi, rdx);
+ // Second parameter register rdi should be set with pointer to AccessorInfo
+ // by a caller.
+#endif
+ // Call the api function!
+ __ movq(rax,
+ reinterpret_cast<int64_t>(fun()->address()),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(rax);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ movq(rsi, scheduled_exception_address);
+ __ Cmp(Operand(rsi, 0), Factory::the_hole_value());
+ __ j(not_equal, &promote_scheduled_exception);
+#ifdef _WIN64
+ // rax keeps a pointer to v8::Handle, unpack it.
+ __ movq(rax, Operand(rax, 0));
+#endif
+ // Check if the result handle holds 0.
+ __ testq(rax, rax);
+ __ j(zero, &empty_result);
+ // It was non-zero. Dereference to get the result value.
+ __ movq(rax, Operand(rax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame();
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ bind(&empty_result);
+ // It was zero; the result is undefined.
+ __ Move(rax, Factory::undefined_value());
+ __ jmp(&prologue);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate_scope,
+ int /* alignment_skew */) {
+ // rax: result parameter for PerformGC, if any.
+ // rbx: pointer to C function (C callee-saved).
+ // rbp: frame pointer (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
+ // This pointer is reused in LeaveExitFrame(), so it is stored in a
+ // callee-saved register.
+
+ // Simple results returned in rax (both AMD64 and Win64 calling conventions).
+ // Complex results must be written to address passed as first argument.
+ // AMD64 calling convention: a struct of two pointers in rax+rdx
+
+ // Check stack alignment.
+ if (FLAG_debug_code) {
+ __ CheckStackAlignment();
+ }
+
+ if (do_gc) {
+ // Pass failure code returned from last attempt as first argument to
+ // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+ // stack is known to be aligned. This function takes one argument which is
+ // passed in register.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // _WIN64
+ __ movq(rdi, rax);
+#endif
+ __ movq(kScratchRegister,
+ FUNCTION_ADDR(Runtime::PerformGC),
+ RelocInfo::RUNTIME_ENTRY);
+ __ call(kScratchRegister);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ incl(Operand(kScratchRegister, 0));
+ }
+
+ // Call C function.
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+ __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
+ if (result_size_ < 2) {
+ // Pass a pointer to the Arguments object as the first argument.
+ // Return result in single register (rax).
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+ } else {
+ ASSERT_EQ(2, result_size_);
+ // Pass a pointer to the result location as the first argument.
+ __ lea(rcx, Operand(rsp, 6 * kPointerSize));
+ // Pass a pointer to the Arguments object as the second argument.
+ __ lea(rdx, Operand(rsp, 4 * kPointerSize));
+ }
+
+#else // _WIN64
+ // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+ __ movq(rdi, r14); // argc.
+ __ movq(rsi, r12); // argv.
+#endif
+ __ call(rbx);
+ // Result is in rax - do not destroy this register!
+
+ if (always_allocate_scope) {
+ __ movq(kScratchRegister, scope_depth);
+ __ decl(Operand(kScratchRegister, 0));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+ // If return value is on the stack, pop it to registers.
+ if (result_size_ > 1) {
+ ASSERT_EQ(2, result_size_);
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
+ __ movq(rax, Operand(rsp, 6 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+ }
+#endif
+ __ lea(rcx, Operand(rax, 1));
+ // Lower 2 bits of rcx are 0 iff rax has failure tag.
+ __ testl(rcx, Immediate(kFailureTagMask));
+ __ j(zero, &failure_returned);
+
+ // Exit the JavaScript to C++ exit frame.
+ __ LeaveExitFrame(result_size_);
+ __ ret(0);
+
+ // Handling of failure.
+ __ bind(&failure_returned);
+
+ Label retry;
+ // If the returned exception is RETRY_AFTER_GC continue at retry label
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ j(zero, &retry);
+
+ // Special handling of out of memory exceptions.
+ __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ cmpq(rax, kScratchRegister);
+ __ j(equal, throw_out_of_memory_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(kScratchRegister, pending_exception_address);
+ __ movq(rax, Operand(kScratchRegister, 0));
+ __ movq(rdx, ExternalReference::the_hole_value_location());
+ __ movq(rdx, Operand(rdx, 0));
+ __ movq(Operand(kScratchRegister, 0), rdx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ __ j(equal, throw_termination_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ // Retry.
+ __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ __ movq(kScratchRegister, handler_address);
+ __ movq(rsp, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ __ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ __ movq(rsp, Operand(rsp, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ movq(kScratchRegister, handler_address);
+ __ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ movq(rax, Immediate(false));
+ __ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ store_rax(pending_exception);
+ }
+
+ // Clear the context pointer.
+ __ xor_(rsi, rsi);
+
+ // Restore registers from handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
+ StackHandlerConstants::kFPOffset);
+ __ pop(rbp); // FP
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ __ pop(rdx); // State
+
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ __ ret(0);
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // rax: number of arguments including receiver
+ // rbx: pointer to C function (C callee-saved)
+ // rbp: frame pointer of calling JS frame (restored after C call)
+ // rsp: stack pointer (restored after C call)
+ // rsi: current context (restored)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(result_size_);
+
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
+ // rbx: pointer to builtin function (C callee-saved).
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
+ // r12: argv pointer (C callee-saved).
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ movq(rax, failure, RelocInfo::NONE);
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Push the stack frame type marker twice.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
+ __ push(rbx);
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, &not_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(&not_outermost_js);
+#endif
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
+ }
+ __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ call(kScratchRegister);
+
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, &not_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
+
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
+ __ pop(rsi);
+ __ pop(rdi);
+#endif
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
+}
+
+
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+ // Returns a bitwise zero to indicate that the value
+ // is and instance of the function and anything else to
+ // indicate that the value is not an instance.
+
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ JumpIfSmi(rax, &slow);
+
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ // rdx is function, rax is map.
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &miss);
+ __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &miss);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(rbx, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Register mapping:
+ // rax is object map.
+ // rdx is function.
+ // rbx is function prototype.
+ __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ // The code at is_not_instance assumes that kScratchRegister contains a
+ // non-zero GCable value (the null object in this case).
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xorl(rax, rax);
+ // Store bitwise zero in the cache. This is a Smi in GC terms.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ // We have to store a non-zero value in the cache.
+ __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s",
+ cc_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name);
+ return name_;
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ testb(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ movq(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ testb(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ testb(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxwl(result_, FieldOperand(object_,
+ scratch_, times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxbl(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ __ Integer32ToSmi(result_, result_);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(rax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ movq(scratch_, rax);
+ }
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case_);
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
+ __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ Condition is_smi;
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rdx);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // rax: first string
+ // rdx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
+ __ SmiTest(rcx);
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in rax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiTest(rbx);
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in rdx.
+ __ movq(rax, rdx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // rax: first string
+ // rbx: length of first string
+ // rcx: length of second string
+ // rdx: second string
+ // r8: map of first string if string check was performed above
+ // r9: map of second string if string check was performed above
+ Label string_add_flat_result, longer_than_two;
+ __ bind(&both_not_zero_length);
+
+ // If arguments where known to be strings, maps are not loaded to r8 and r9
+ // by the code above.
+ if (!string_check_) {
+ __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ }
+ // Get the instance types of the two strings as they will be needed soon.
+ __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
+ __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+
+ // Look at the length of the result of adding the two strings.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+ __ SmiAdd(rbx, rbx, rcx, NULL);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ SmiCompare(rbx, Smi::FromInt(2));
+ __ j(not_equal, &longer_than_two);
+
+ // Check that both strings are non-external ascii strings.
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string, make_flat_ascii_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ __ Set(rbx, 2);
+ __ jmp(&make_flat_ascii_string);
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+ __ j(below, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
+ __ j(above, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object. If
+ // both strings are ascii the result is an ascii cons string.
+ // rax: first string
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii, allocated, ascii_data;
+ __ movl(rcx, r8);
+ __ and_(rcx, r9);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(rcx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ __ bind(&ascii_data);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+ __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ movq(rax, rcx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // rcx: first instance type AND second instance type.
+ // r8: first instance type.
+ // r9: second instance type.
+ __ testb(rcx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ xor_(r8, r9);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ j(equal, &ascii_data);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // rax: first string
+ // rbx: length of resulting flat string as smi
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&string_add_flat_result);
+ __ SmiToInteger32(rbx, rbx);
+ __ movl(rcx, r8);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ __ movl(rcx, r9);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // rax: first string
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii_string_add_flat_result;
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(r8, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ testl(r9, Immediate(kAsciiStringTag));
+ __ j(zero, &string_add_runtime);
+
+ __ bind(&make_flat_ascii_string);
+ // Both strings are ascii strings. As they are short they are both flat.
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second string
+ // rdi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+ // Locate first character of second argument.
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // rax: first string - known to be two byte
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ and_(r9, Immediate(kAsciiStringTag));
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second argument
+ // rdi: length of first argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+ // Locate first character of second argument.
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ incq(src);
+ __ incq(dest);
+ } else {
+ __ movzxwl(kScratchRegister, Operand(src, 0));
+ __ movw(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(2));
+ __ addq(dest, Immediate(2));
+ }
+ __ decl(count);
+ __ j(not_zero, &loop);
+}
+
+
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ // Count is positive int32, dest and src are character pointers.
+ ASSERT(dest.is(rdi)); // rep movs destination
+ ASSERT(src.is(rsi)); // rep movs source
+ ASSERT(count.is(rcx)); // rep movs count
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ testl(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ STATIC_ASSERT(2 == sizeof(uc16));
+ __ addl(count, count);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ testl(count, Immediate(~7));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ movl(kScratchRegister, count);
+ __ shr(count, Immediate(3)); // Number of doublewords to copy.
+ __ repmovsq();
+
+ // Find number of bytes left.
+ __ movl(count, kScratchRegister);
+ __ and_(count, Immediate(7));
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ testl(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ incq(src);
+ __ incq(dest);
+ __ decl(count);
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ leal(scratch, Operand(c1, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(above, &not_array_index);
+ __ leal(scratch, Operand(c2, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ j(below_equal, not_found);
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ GenerateHashInit(masm, hash, c1, scratch);
+ GenerateHashAddCharacter(masm, hash, c2, scratch);
+ GenerateHashGetHash(masm, hash, scratch);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ shl(c2, Immediate(kBitsPerByte));
+ __ orl(chars, c2);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ SmiToInteger32(mask,
+ FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ decl(mask);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Registers
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string (32-bit int)
+ // symbol_table: symbol table
+ // mask: capacity mask (32-bit int)
+ // undefined: undefined value
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ __ movl(scratch, hash);
+ if (i > 0) {
+ __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ }
+ __ andl(scratch, mask);
+
+ // Load the entry from the symble table.
+ Register candidate = scratch; // Scratch register contains candidate.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ movq(candidate,
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
+
+ // If entry is undefined no string with this hash can be found.
+ __ cmpq(candidate, undefined);
+ __ j(equal, not_found);
+
+ // If length is not 2 the string is not a candidate.
+ __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
+ Smi::FromInt(2));
+ __ j(not_equal, &next_probe[i]);
+
+ // We use kScratchRegister as a temporary register in assumption that
+ // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
+ Register temp = kScratchRegister;
+
+ // Check that the candidate is a non-external ascii string.
+ __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+ __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(
+ temp, temp, &next_probe[i]);
+
+ // Check if the two characters match.
+ __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ andl(temp, Immediate(0x0000ffff));
+ __ cmpl(chars, temp);
+ __ j(equal, &found_in_symbol_table);
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = scratch;
+ __ bind(&found_in_symbol_table);
+ if (!result.is(rax)) {
+ __ movq(rax, result);
+ }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash = character + (character << 10);
+ __ movl(hash, character);
+ __ shll(hash, Immediate(10));
+ __ addl(hash, character);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch) {
+ // hash += character;
+ __ addl(hash, character);
+ // hash += hash << 10;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(10));
+ __ addl(hash, scratch);
+ // hash ^= hash >> 6;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(6));
+ __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch) {
+ // hash += hash << 3;
+ __ leal(hash, Operand(hash, hash, times_8, 0));
+ // hash ^= hash >> 11;
+ __ movl(scratch, hash);
+ __ sarl(scratch, Immediate(11));
+ __ xorl(hash, scratch);
+ // hash += hash << 15;
+ __ movl(scratch, hash);
+ __ shll(scratch, Immediate(15));
+ __ addl(hash, scratch);
+
+ // if (hash == 0) hash = 27;
+ Label hash_not_zero;
+ __ j(not_zero, &hash_not_zero);
+ __ movl(hash, Immediate(27));
+ __ bind(&hash_not_zero);
+}
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: to
+ // rsp[16]: from
+ // rsp[24]: string
+
+ const int kToOffset = 1 * kPointerSize;
+ const int kFromOffset = kToOffset + kPointerSize;
+ const int kStringOffset = kFromOffset + kPointerSize;
+ const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+ // Make sure first argument is a string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // Calculate length of sub string using the smi values.
+ Label result_longer_than_two;
+ __ movq(rcx, Operand(rsp, kToOffset));
+ __ movq(rdx, Operand(rsp, kFromOffset));
+ __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+ __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
+ Label return_rax;
+ __ j(equal, &return_rax);
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ SmiToInteger32(rcx, rcx);
+ __ cmpl(rcx, Immediate(2));
+ __ j(greater, &result_longer_than_two);
+ __ j(less, &runtime);
+
+ // Sub string of length 2 requested.
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length (value is 2)
+ // rdx: from index (smi)
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
+
+ // Get the two characters forming the sub string.
+ __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
+ __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx,
+ FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&make_two_character_string);
+ // Setup registers for allocating the two character string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ Set(rcx, 2);
+
+ __ bind(&result_longer_than_two);
+
+ // rax: string
+ // rbx: instance type
+ // rcx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ __ movq(rsi, rdx); // Restore rsi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ __ bind(&non_ascii_flat);
+ // rax: string
+ // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // rcx: result string length
+ // Check for sequential two byte string
+ __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ __ movq(rsi, rdx); // Restore esi.
+
+ __ bind(&return_rax);
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Ensure that you can always subtract a string length from a non-negative
+ // number (e.g. another length).
+ STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+
+ // Find minimum length and length difference.
+ __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movq(scratch4, scratch1);
+ __ SmiSub(scratch4,
+ scratch4,
+ FieldOperand(right, String::kLengthOffset),
+ NULL);
+ // Register scratch4 now holds left.length - right.length.
+ const Register length_difference = scratch4;
+ Label left_shorter;
+ __ j(less, &left_shorter);
+ // The right string isn't longer that the left one.
+ // Get the right string's length by subtracting the (non-negative) difference
+ // from the left string's length.
+ __ SmiSub(scratch1, scratch1, length_difference, NULL);
+ __ bind(&left_shorter);
+ // Register scratch1 now holds Min(left.length, right.length).
+ const Register min_length = scratch1;
+
+ Label compare_lengths;
+ // If min-length is zero, go directly to comparing lengths.
+ __ SmiTest(min_length);
+ __ j(zero, &compare_lengths);
+
+ __ SmiToInteger32(min_length, min_length);
+
+ // Registers scratch2 and scratch3 are free.
+ Label result_not_equal;
+ Label loop;
+ {
+ // Check characters 0 .. min_length - 1 in a loop.
+ // Use scratch3 as loop index, min_length as limit and scratch2
+ // for computation.
+ const Register index = scratch3;
+ __ movl(index, Immediate(0)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ // TODO(lrn): Could we load more than one character at a time?
+ __ movb(scratch2, FieldOperand(left,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ // Increment index and use -1 modifier on next load to give
+ // the previous load extra time to complete.
+ __ addl(index, Immediate(1));
+ __ cmpb(scratch2, FieldOperand(right,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize - 1));
+ __ j(not_equal, &result_not_equal);
+ __ cmpl(index, min_length);
+ __ j(not_equal, &loop);
+ }
+ // Completed loop without finding different characters.
+ // Compare lengths (precomputed).
+ __ bind(&compare_lengths);
+ __ SmiTest(length_difference);
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ ret(0);
+
+ Label result_greater;
+ __ bind(&result_not_equal);
+ // Unequal comparison of left to right, either character or length.
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Move(rax, Smi::FromInt(LESS));
+ __ ret(0);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(rax, Smi::FromInt(GREATER));
+ __ ret(0);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: right string
+ // rsp[16]: left string
+
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+
+ // Check for identity.
+ Label not_same;
+ __ cmpq(rdx, rax);
+ __ j(not_equal, &not_same);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+
+ // Inline comparison of ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ // Drop arguments from the stack
+ __ pop(rcx);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rcx);
+ GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
new file mode 100644
index 00000000..18213b93
--- /dev/null
+++ b/src/x64/code-stubs-x64.h
@@ -0,0 +1,389 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_CODE_STUBS_X64_H_
+#define V8_X64_CODE_STUBS_X64_H_
+
+#include "ic-inl.h"
+#include "type-info.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+ void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags,
+ TypeInfo operands_type = TypeInfo::Unknown())
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false),
+ static_operands_type_(operands_type),
+ runtime_operands_type_(BinaryOpIC::DEFAULT),
+ name_(NULL) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ flags_(FlagBits::decode(key)),
+ args_in_registers_(ArgsInRegistersBits::decode(key)),
+ args_reversed_(ArgsReversedBits::decode(key)),
+ static_operands_type_(TypeInfo::ExpandedRepresentation(
+ StaticTypeInfoBits::decode(key))),
+ runtime_operands_type_(type_info),
+ name_(NULL) {
+ }
+
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ bool ArgsInRegistersSupported() {
+ return (op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV);
+ }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
+
+ // Number type information of operands, determined by code generator.
+ TypeInfo static_operands_type_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub %d (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_),
+ static_operands_type_.ToString());
+ }
+#endif
+
+ // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
+ class ArgsReversedBits: public BitField<bool, 10, 1> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
+ class StaticTypeInfoBits: public BitField<int, 12, 3> {};
+ class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 18 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_)
+ | StaticTypeInfoBits::encode(
+ static_operands_type_.ThreeBitRepresentation())
+ | RuntimeTypeInfoBits::encode(runtime_operands_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
+
+ bool ShouldGenerateSmiCode() {
+ return HasSmiCodeInStub() &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
+ static void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register rax.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character,
+ Register scratch);
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash,
+ Register scratch);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {}
+
+ // Compare two flat ascii strings and returns result in rax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+ Register hash,
+ Register mask);
+
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits. 4 bits for each of the three
+ // registers (object, address and scratch) OOOOAAAASSSS.
+ class ScratchBits : public BitField<uint32_t, 0, 4> {};
+ class AddressBits : public BitField<uint32_t, 4, 4> {};
+ class ObjectBits : public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 77828d63..b1dd45e2 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -807,55 +808,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
}
-class FloatingPointHelper : public AllStatic {
- public:
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-};
-
-
-const char* GenericBinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
- op_name,
- overwrite_name,
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
- args_in_registers_ ? "RegArgs" : "StackArgs",
- args_reversed_ ? "_R" : "",
- static_operands_type_.ToString(),
- BinaryOpIC::GetName(runtime_operands_type_));
- return name_;
-}
-
-
// Call the specialized stub for a binary operation.
class DeferredInlineBinaryOperation: public DeferredCode {
public:
@@ -1072,7 +1024,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_SMI_CODE_IN_STUB,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
false, overwrite_mode);
@@ -1095,7 +1047,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
overwrite_mode,
NO_GENERIC_BINARY_FLAGS,
operands_type);
- answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
}
}
@@ -2038,41 +1990,6 @@ void CodeGenerator::Comparison(AstNode* node,
ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
left_side_constant_smi, right_side_constant_smi,
is_loop_condition);
- } else if (cc == equal &&
- (left_side_constant_null || right_side_constant_null)) {
- // To make null checks efficient, we check if either the left side or
- // the right side is the constant 'null'.
- // If so, we optimize the code by inlining a null check instead of
- // calling the (very) general runtime routine for checking equality.
- Result operand = left_side_constant_null ? right_side : left_side;
- right_side.Unuse();
- left_side.Unuse();
- operand.ToRegister();
- __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
- if (strict) {
- operand.Unuse();
- dest->Split(equal);
- } else {
- // The 'null' value is only equal to 'undefined' if using non-strict
- // comparisons.
- dest->true_target()->Branch(equal);
- __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
- dest->true_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(operand.reg());
- dest->false_target()->Branch(is_smi);
-
- // It can be an undetectable object.
- // Use a scratch register in preference to spilling operand.reg().
- Result temp = allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
- __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- temp.Unuse();
- operand.Unuse();
- dest->Split(not_zero);
- }
} else if (left_side_constant_1_char_string ||
right_side_constant_1_char_string) {
if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
@@ -2619,7 +2536,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
__ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rcx, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ Cmp(rcx, apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -4833,7 +4750,7 @@ void DeferredAllocateInNewSpace::Generate() {
for (int i = kNumRegs - 1; i >= 0; i--) {
if (registers_to_save_ & (1 << i)) {
Register save_register = { i };
- __ push(save_register);
+ __ pop(save_register);
}
}
}
@@ -5092,12 +5009,9 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5184,12 +5098,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame()->Push(&value);
Load(node->value());
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
// Construct the implicit binary operation.
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5287,11 +5198,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
Load(node->value());
// Perform the binary operation.
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
+ bool overwrite_value = node->value()->ResultOverwriteAllowed();
+ BinaryOperation expr(node);
GenericBinaryOperation(&expr,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
@@ -5694,11 +5602,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// actual function to call is resolved after the arguments have been
// evaluated.
- // Compute function to call and use the global object as the
- // receiver. There is no need to use the global proxy here because
- // it will always be replaced with a newly allocated object.
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
Load(node->expression());
- LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
@@ -5711,8 +5618,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// constructor invocation.
CodeForSourcePosition(node->position());
Result result = frame_->CallConstructor(arg_count);
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
@@ -6648,6 +6554,14 @@ void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
__ cmpq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ j(not_equal, &done);
+ if (FLAG_debug_code) {
+ // Check that object really has empty properties array, as the map
+ // should guarantee.
+ __ CompareRoot(FieldOperand(rax, JSObject::kPropertiesOffset),
+ Heap::kEmptyFixedArrayRootIndex);
+ __ Check(equal, "JSRegExpResult: default map but non-empty properties.");
+ }
+
DeferredAllocateInNewSpace* allocate_fallback =
new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
rbx,
@@ -6680,7 +6594,6 @@ void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
Label empty;
__ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &empty);
- ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
__ LoadRoot(kScratchRegister, Heap::kFixedCOWArrayMapRootIndex);
__ movq(FieldOperand(rdx, HeapObject::kMapOffset), kScratchRegister);
__ bind(&empty);
@@ -7310,6 +7223,34 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result value = frame_->Pop();
+ value.ToRegister();
+ ASSERT(value.is_valid());
+ __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ value.Unuse();
+ destination()->Split(zero);
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result string = frame_->Pop();
+ string.ToRegister();
+
+ Result number = allocator()->Allocate();
+ ASSERT(number.is_valid());
+ __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
+ __ IndexFromHash(number.reg(), number.reg());
+ string.Unuse();
+ frame_->Push(&number);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -7438,9 +7379,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
- bool can_overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = node->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
bool no_negative_zero = node->expression()->no_negative_zero();
@@ -7858,11 +7797,9 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
OverwriteMode overwrite_mode = NO_OVERWRITE;
- if (node->left()->AsBinaryOperation() != NULL &&
- node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ if (node->left()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_LEFT;
- } else if (node->right()->AsBinaryOperation() != NULL &&
- node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
+ } else if (node->right()->ResultOverwriteAllowed()) {
overwrite_mode = OVERWRITE_RIGHT;
}
@@ -8048,6 +7985,40 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ Comment cmnt(masm_, "[ CompareToNull");
+
+ Load(node->expression());
+ Result operand = frame_->Pop();
+ operand.ToRegister();
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
+ if (node->is_strict()) {
+ operand.Unuse();
+ destination()->Split(equal);
+ } else {
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ destination()->true_target()->Branch(equal);
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
+ destination()->true_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(operand.reg());
+ destination()->false_target()->Branch(is_smi);
+
+ // It can be an undetectable object.
+ // Use a scratch register in preference to spilling operand.reg().
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ movq(temp.reg(),
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ temp.Unuse();
+ operand.Unuse();
+ destination()->Split(not_zero);
+ }
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
@@ -8818,3941 +8789,17 @@ void Reference::SetValue(InitState init_state) {
}
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- // Compute the function map in the current global context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
- __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Setup the fixed slots.
- __ xor_(rbx, rbx); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the surrounding context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
- int size = JSArray::kSize + elements_size;
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // 'null' => false.
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
- __ j(above_equal, &not_string);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result);
- // HeapNumber => false iff +0, -0, or NaN.
- // These three cases set the zero flag when compared to zero using ucomisd.
- __ xorpd(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in rax.
- __ bind(&true_result);
- __ movq(rax, Immediate(1));
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ xor_(rax, rax);
- __ ret(1 * kPointerSize);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (!(left.is(left_arg) && right.is(right_arg))) {
- if (left.is(right_arg) && right.is(left_arg)) {
- if (IsOperationCommutative()) {
- SetArgsReversed();
- } else {
- __ xchg(left, right);
- }
- } else if (left.is(left_arg)) {
- __ movq(right_arg, right);
- } else if (right.is(right_arg)) {
- __ movq(left_arg, left);
- } else if (left.is(right_arg)) {
- if (IsOperationCommutative()) {
- __ movq(left_arg, right);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying left argument.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- } else if (right.is(left_arg)) {
- if (IsOperationCommutative()) {
- __ movq(right_arg, left);
- SetArgsReversed();
- } else {
- // Order of moves important to avoid destroying right argument.
- __ movq(right_arg, right);
- __ movq(left_arg, left);
- }
- } else {
- // Order of moves is not important.
- __ movq(left_arg, left);
- __ movq(right_arg, right);
- }
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Register left,
- Smi* right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ push(left);
- __ Push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (left.is(left_arg)) {
- __ Move(right_arg, right);
- } else if (left.is(right_arg) && IsOperationCommutative()) {
- __ Move(left_arg, right);
- SetArgsReversed();
- } else {
- // For non-commutative operations, left and right_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite left before moving
- // it to left_arg.
- __ movq(left_arg, left);
- __ Move(right_arg, right);
- }
-
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
- MacroAssembler* masm,
- Smi* left,
- Register right) {
- if (!ArgsInRegistersSupported()) {
- // Pass arguments on the stack.
- __ Push(left);
- __ push(right);
- } else {
- // The calling convention with registers is left in rdx and right in rax.
- Register left_arg = rdx;
- Register right_arg = rax;
- if (right.is(right_arg)) {
- __ Move(left_arg, left);
- } else if (right.is(left_arg) && IsOperationCommutative()) {
- __ Move(right_arg, left);
- SetArgsReversed();
- } else {
- // For non-commutative operations, right and left_arg might be
- // the same register. Therefore, the order of the moves is
- // important here in order to not overwrite right before moving
- // it to right_arg.
- __ movq(right_arg, right);
- __ Move(left_arg, left);
- }
- // Update flags to indicate that arguments are in registers.
- SetArgsInRegisters();
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- }
-
- // Call the stub.
- __ CallStub(this);
-}
-
-
-Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right) {
- if (ArgsInRegistersSupported()) {
- SetArgsInRegisters();
- return frame->CallStub(this, left, right);
- } else {
- frame->Push(left);
- frame->Push(right);
- return frame->CallStub(this, 2);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
- // dividend in rax and rdx free for the division. Use rax, rbx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = rdx;
- Register right = rax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
- left = rax;
- right = rbx;
- if (HasArgsInRegisters()) {
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- }
- }
- if (!HasArgsInRegisters()) {
- __ movq(right, Operand(rsp, 1 * kPointerSize));
- __ movq(left, Operand(rsp, 2 * kPointerSize));
- }
-
- Label not_smis;
- // 2. Smi check both operands.
- if (static_operands_type_.IsSmi()) {
- // Skip smi check if we know that both arguments are smis.
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(left);
- __ AbortIfNotSmi(right);
- }
- if (op_ == Token::BIT_OR) {
- // Handle OR here, since we do extra smi-checking in the or code below.
- __ SmiOr(right, right, left);
- GenerateReturn(masm);
- return;
- }
- } else {
- if (op_ != Token::BIT_OR) {
- // Skip the check for OR as it is better combined with the
- // actual operation.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
- }
-
- // 3. Operands are both smis (except for OR), perform the operation leaving
- // the result in rax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op_) {
- case Token::ADD: {
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
- }
-
- case Token::SUB: {
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
- }
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- ASSERT(left.is(rax));
- __ SmiDiv(left, left, right, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- ASSERT(left.is(rax));
- __ SmiMod(left, left, right, slow);
- break;
-
- case Token::BIT_OR:
- ASSERT(right.is(rax));
- __ movq(rcx, right); // Save the right operand.
- __ SmiOr(right, right, left); // BIT_OR is commutative.
- __ testb(right, Immediate(kSmiTagMask));
- __ j(not_zero, &not_smis);
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- switch (op_) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, slow);
- break;
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- break;
- default:
- UNREACHABLE();
- }
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // 4. Emit return of result in rax.
- GenerateReturn(masm);
-
- // 5. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- ASSERT(use_fp_on_smis.is_linked());
- __ bind(&use_fp_on_smis);
- if (op_ == Token::DIV) {
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- }
- // left is rdx, right is rax.
- __ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rbx);
- GenerateReturn(masm);
- }
- default:
- break;
- }
-
- // 6. Non-smi operands, fall out to the non-smi code with the operands in
- // rdx and rax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
-
- switch (op_) {
- case Token::DIV:
- case Token::MOD:
- // Operands are in rax, rbx at this point.
- __ movq(rdx, rax);
- __ movq(rax, rbx);
- break;
-
- case Token::BIT_OR:
- // Right operand is saved in rcx and rax was destroyed by the smi
- // operation.
- __ movq(rax, rcx);
- break;
-
- default:
- break;
- }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (ShouldGenerateSmiCode()) {
- GenerateSmiCode(masm, &call_runtime);
- } else if (op_ != Token::MOD) {
- if (!HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
- }
- // Floating point case.
- if (ShouldGenerateFPCode()) {
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-smi argument occurs
- // (and only if smi code is generated). This is the right moment to
- // patch to HEAP_NUMBERS state. The transition is attempted only for
- // the four basic operations. The stub stays in the DEFAULT state
- // forever for all other operations (also if smi code is skipped).
- GenerateTypeTransition(masm);
- break;
- }
-
- Label not_floats;
- // rax: y
- // rdx: x
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadSSE2NumberOperands(masm);
- } else {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Allocate a heap number, if needed.
- Label skip_allocation;
- OverwriteMode mode = mode_;
- if (HasArgsReversed()) {
- if (mode == OVERWRITE_RIGHT) {
- mode = OVERWRITE_LEFT;
- } else if (mode == OVERWRITE_LEFT) {
- mode = OVERWRITE_RIGHT;
- }
- }
- switch (mode) {
- case OVERWRITE_LEFT:
- __ JumpIfNotSmi(rdx, &skip_allocation);
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- __ movq(rax, rdx);
- break;
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, &call_runtime);
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- __ bind(&not_floats);
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
- !HasSmiCodeInStub()) {
- // Execution reaches this point when the first non-number argument
- // occurs (and only if smi code is skipped from the stub, otherwise
- // the patching has already been done earlier in this case branch).
- // A perfect moment to try patching to STRINGS for ADD operation.
- if (op_ == Token::ADD) {
- GenerateTypeTransition(masm);
- }
- }
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label skip_allocation, non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (static_operands_type_.IsNumber()) {
- if (FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- }
- FloatingPointHelper::LoadNumbersAsIntegers(masm);
- } else {
- FloatingPointHelper::LoadAsIntegers(masm,
- &call_runtime,
- heap_number_map);
- }
- switch (op_) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
-
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- GenerateReturn(masm);
-
- // All bit-ops except SHR return a signed int32 that can be
- // returned immediately as a smi.
- // We might need to allocate a HeapNumber if we shift a negative
- // number right by zero (i.e., convert to UInt32).
- if (op_ == Token::SHR) {
- ASSERT(non_smi_shr_result.is_linked());
- __ bind(&non_smi_shr_result);
- // Allocate a heap number if needed.
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rcx,
- no_reg,
- &call_runtime,
- TAG_OBJECT);
- // Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- GenerateReturn(masm);
- }
-
- break;
- }
- default: UNREACHABLE(); break;
- }
- }
-
- // If all else fails, use the runtime system to get the correct
- // result. If arguments was passed in registers now place them on the
- // stack in the correct order below the return address.
- __ bind(&call_runtime);
-
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- switch (op_) {
- case Token::ADD: {
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
-
- if (HasArgsReversed()) {
- lhs = rax;
- rhs = rdx;
- } else {
- lhs = rdx;
- rhs = rax;
- }
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- // If this stub has already generated FP-specific code then the arguments
- // are already in rdx and rax.
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
- GenerateLoadArguments(masm);
- }
-
- Condition is_smi;
- is_smi = masm->CheckSmi(lhs);
- __ j(is_smi, &not_string1);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &not_string1);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(&not_string1);
- is_smi = masm->CheckSmi(rhs);
- __ j(is_smi, &not_strings);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
- __ j(above_equal, &not_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(&not_strings);
- // Neither argument is a string.
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
- ASSERT(!HasArgsInRegisters());
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
- // If arguments are not passed in registers remove them from the stack before
- // returning.
- if (!HasArgsInRegisters()) {
- __ ret(2 * kPointerSize); // Remove both operands
- } else {
- __ ret(0);
- }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- ASSERT(HasArgsInRegisters());
- __ pop(rcx);
- if (HasArgsReversed()) {
- __ push(rax);
- __ push(rdx);
- } else {
- __ push(rdx);
- __ push(rax);
- }
- __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- // Ensure the operands are on the stack.
- if (HasArgsInRegisters()) {
- GenerateRegisterArgsPush(masm);
- }
-
- // Left and right arguments are already on stack.
- __ pop(rcx); // Save the return address.
-
- // Push this stub's key.
- __ Push(Smi::FromInt(MinorKey()));
-
- // Although the operation and the type info are encoded into the key,
- // the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(op_));
-
- __ Push(Smi::FromInt(runtime_operands_type_));
-
- __ push(rcx); // The return address.
-
- // Perform patching to an appropriate fast case and return the result.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
- 5,
- 1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label input_not_smi;
- Label loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- __ movq(rax, ExternalReference::transcendental_cache_array_address());
- // rax points to cache array.
- __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss);
- // Cache hit!
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
-
- __ bind(&cache_miss);
- // Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
-
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
-
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
- // Registers:
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- ASSERT(type_ == TranscendentalCache::SIN ||
- type_ == TranscendentalCache::COS);
- // More transcendental types can be added later.
-
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rdx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the edx heap number in rcx.
- IntegerConvert(masm, rdx, rdx);
-
- // Here rdx has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rax, rax);
- __ movl(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ movl(rcx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- if (op_ == Token::SUB) {
- // Check whether the value is a smi.
- Label try_float;
- __ JumpIfNotSmi(rax, &try_float);
-
- if (negative_zero_ == kIgnoreNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(equal, &done);
- }
-
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
-
- // Either zero or Smi::kMinValue, neither of which become a smi when
- // negated.
- if (negative_zero_ == kStrictNegativeZero) {
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
- } else {
- __ jmp(&slow);
- }
-
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- } else if (op_ == Token::BIT_NOT) {
- // Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- }
-
- // Return from the stub.
- __ bind(&done);
- __ StubReturn(1);
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
- int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- // Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ SmiTest(rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack and untag the length.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Setup the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[8]: last_match_info (expected JSArray)
- // esp[16]: previous index
- // esp[24]: subject string
- // esp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ movq(kScratchRegister, Operand(kScratchRegister, 0));
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rcx);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rcx, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rcx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
- __ j(above, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
-
- // rcx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
- __ andb(rbx, Immediate(
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be a flat ascii string.
- __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
- __ j(zero, &seq_ascii_string);
-
- // Check for flat cons string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- STATIC_ASSERT(kExternalStringTag !=0);
- STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
- __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
- __ j(not_zero, &runtime);
- // String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
- __ Cmp(rdx, Factory::empty_string());
- __ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // String is a cons string with empty second part.
- // rax: first part of cons string.
- // rbx: map of first part of cons string.
- // Is first part a flat two byte string?
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string);
- // Any other flat string must be ascii.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &runtime);
-
- __ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
- __ jmp(&check_code);
-
- __ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
-
- __ bind(&check_code);
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // the hole.
- __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
-
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(&Counters::regexp_entry_native, 1);
-
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
-
- static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
-
- // Argument 7: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- Immediate(1));
-
- // Argument 6: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- // Argument 6 passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), r9);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ movq(r8, ExternalReference::address_of_static_offsets_vector());
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
- // rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r11: code
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
- __ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
- __ jmp(&setup_rest);
- __ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
- __ bind(&setup_rest);
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 1: Subject string.
- __ movq(arg1, rax);
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
-
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
-
- // Check the result.
- Label success;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
- __ j(equal, &success);
- Label failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ movq(rcx, rbx);
- __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
-
- // Get the static offsets vector filled by the native regexp code.
- __ movq(rcx, ExternalReference::address_of_static_offsets_vector());
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi, &runtime);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object, Factory::heap_number_map(), not_found, true);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope fscope(SSE2);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(&Counters::number_to_string_native, 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- Label check_unequal_objects, done;
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, &not_identical);
-
- if (cc_ != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
-
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
- }
-
- __ bind(&not_identical);
- }
-
- if (cc_ == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, &not_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&not_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
- }
-
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ascii strings.
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0);
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right) {
+ if (stub->ArgsInRegistersSupported()) {
+ stub->SetArgsInRegisters();
+ return frame_->CallStub(stub, left, right);
} else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(rax);
- __ Push(Smi::FromInt(0));
- __ push(rax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // If the receiver might be a value (string, number or boolean) check for this
- // and box it if it is.
- if (ReceiverMightBeValue()) {
- // Get the receiver from the stack.
- // +1 ~ return address
- Label receiver_is_value, receiver_is_js_object;
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
- // Check if receiver is a smi (which is a number value).
- __ JumpIfSmi(rax, &receiver_is_value);
-
- // Check if the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
- __ j(above_equal, &receiver_is_js_object);
-
- // Call the runtime to box the value.
- __ bind(&receiver_is_value);
- __ EnterInternalFrame();
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ LeaveInternalFrame();
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
- __ bind(&receiver_is_js_object);
- }
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ xor_(rsi, rsi); // tentatively set context pointer to NULL
- Label skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
-}
-
-
-void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
- Label empty_result;
- Label prologue;
- Label promote_scheduled_exception;
- __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, 0);
- ASSERT_EQ(kArgc, 4);
-#ifdef _WIN64
- // All the parameters should be set up by a caller.
-#else
- // Set 1st parameter register with property name.
- __ movq(rsi, rdx);
- // Second parameter register rdi should be set with pointer to AccessorInfo
- // by a caller.
-#endif
- // Call the api function!
- __ movq(rax,
- reinterpret_cast<int64_t>(fun()->address()),
- RelocInfo::RUNTIME_ENTRY);
- __ call(rax);
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address();
- __ movq(rsi, scheduled_exception_address);
- __ Cmp(Operand(rsi, 0), Factory::the_hole_value());
- __ j(not_equal, &promote_scheduled_exception);
-#ifdef _WIN64
- // rax keeps a pointer to v8::Handle, unpack it.
- __ movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- __ testq(rax, rax);
- __ j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- __ movq(rax, Operand(rax, 0));
- __ bind(&prologue);
- __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
- __ ret(0);
- __ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
- __ bind(&empty_result);
- // It was zero; the result is undefined.
- __ Move(rax, Factory::undefined_value());
- __ jmp(&prologue);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ incl(Operand(kScratchRegister, 0));
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, Operand(rsp, 4 * kPointerSize));
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, Operand(rsp, 6 * kPointerSize));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, Operand(rsp, 4 * kPointerSize));
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r12); // argv.
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- __ movq(kScratchRegister, scope_depth);
- __ decl(Operand(kScratchRegister, 0));
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(mode_, result_size_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry);
-
- // Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ movq(rax, Operand(kScratchRegister, 0));
- __ movq(rdx, ExternalReference::the_hole_value_location());
- __ movq(rdx, Operand(rdx, 0));
- __ movq(Operand(kScratchRegister, 0), rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
- }
-
- // Clear the context pointer.
- __ xor_(rsi, rsi);
-
- // Restore registers from handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
-
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- __ ret(0);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, result_size_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r12: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- Label not_outermost_js, not_outermost_js_2;
-#endif
-
- // Setup frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
- __ load_rax(c_entry_fp);
- __ push(rax);
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
- __ InitializeSmiConstantRegister();
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
- __ load_rax(js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ movq(rax, rbp);
- __ store_rax(js_entry_sp);
- __ bind(&not_outermost_js);
-#endif
-
- // Call a faked try-block that does the invoke.
- __ call(&invoke);
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ store_rax(pending_exception);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-
- // Clear any pending exceptions.
- __ load_rax(ExternalReference::the_hole_value_location());
- __ store_rax(pending_exception);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline
- // builtin and pop the faked function when we return. We load the address
- // from an external reference instead of inlining the call target address
- // directly in the code, because the builtin stubs may not have been
- // generated yet at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ load_rax(construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ load_rax(entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
- __ pop(Operand(kScratchRegister, 0));
- // Pop next_sp.
- __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current EBP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ movq(kScratchRegister, js_entry_sp);
- __ cmpq(rbp, Operand(kScratchRegister, 0));
- __ j(not_equal, &not_outermost_js_2);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-#endif
-
- // Restore the top frame descriptor from the stack.
- __ bind(&exit);
- __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
- __ pop(Operand(kScratchRegister, 0));
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Get the object - go slow case if it's a smi.
- Label slow;
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- // rdx is function, rax is map.
-
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&miss);
- __ TryGetFunctionPrototype(rdx, rbx, &slow);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- __ bind(&is_not_instance);
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name);
- return name_;
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(zero, &flat_string);
-
- // Handle non-flat strings.
- __ testb(result_, Immediate(kIsConsStringMask));
- __ j(zero, &call_runtime_);
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result_, Immediate(kStringRepresentationMask));
- __ j(not_zero, &call_runtime_);
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT(kAsciiStringTag != 0);
- __ testb(result_, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
- __ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
- SeqAsciiString::kHeaderSize));
- __ bind(&got_char_code);
- __ Integer32ToSmi(result_, result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!scratch_.is(rax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ movq(scratch_, rax);
- }
- __ pop(index_);
- __ pop(object_);
- // Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
-
- // First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length);
- // Second string is empty, result is first string which is already in rax.
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string if string check was performed above
- // r9: map of second string if string check was performed above
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (!string_check_) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx, NULL);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ascii strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
-
- // Try to lookup two character string in symbol table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rbx, 2);
- __ jmp(&make_flat_ascii_string);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &string_add_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(rcx, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
- __ jmp(&allocated);
-
- // Handle creating a flat result. First check that both strings are not
- // external strings.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&string_add_flat_result);
- __ SmiToInteger32(rbx, rbx);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // Now check if both strings are ascii strings.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
- __ testl(r8, Immediate(kAsciiStringTag));
- __ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kAsciiStringTag));
- __ j(zero, &string_add_runtime);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- __ bind(&non_ascii_string_add_flat_result);
- __ and_(r9, Immediate(kAsciiStringTag));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
- // Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
- __ IncrementCounter(&Counters::string_add_native, 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- Label done;
- __ testl(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~7));
- __ j(zero, &last_bytes);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
-
- // Calculate capacity mask from the symbol table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ decl(mask);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // symbol_table: symbol table
- // mask: capacity mask (32-bit int)
- // undefined: undefined value
- // scratch: -
-
- // Perform a number of probes in the symbol table.
- static const int kProbes = 4;
- Label found_in_symbol_table;
- Label next_probe[kProbes];
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the symble table.
- Register candidate = scratch; // Scratch register contains candidate.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- __ cmpq(candidate, undefined);
- __ j(equal, not_found);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ascii string.
- __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_symbol_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = scratch;
- __ bind(&found_in_symbol_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = character + (character << 10);
- __ movl(hash, character);
- __ shll(hash, Immediate(10));
- __ addl(hash, character);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ sarl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ movl(hash, Immediate(27));
- __ bind(&hash_not_zero);
-}
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- Label result_longer_than_two;
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
- __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
- Label return_rax;
- __ j(equal, &return_rax);
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache.
- __ SmiToInteger32(rcx, rcx);
- __ cmpl(rcx, Immediate(2));
- __ j(greater, &result_longer_than_two);
- __ j(less, &runtime);
-
- // Sub string of length 2 requested.
- // rax: string
- // rbx: instance type
- // rcx: sub string length (value is 2)
- // rdx: from index (smi)
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
-
- // Get the two characters forming the sub string.
- __ SmiToInteger32(rdx, rdx); // From index is no longer smi.
- __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx,
- FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
-
- // Try to lookup two character string in symbol table.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
- __ ret(3 * kPointerSize);
-
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ movq(rax, Operand(rsp, kStringOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ Set(rcx, 2);
-
- __ bind(&result_longer_than_two);
-
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- }
-
- // rax: result string
- // rcx: result length
- // rdx: original value of rsi
- // rdi: first character of result
- // rsi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
-
- __ bind(&return_rax);
- __ IncrementCounter(&Counters::sub_string_native, 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset),
- NULL);
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- Label left_shorter;
- __ j(less, &left_shorter);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference, NULL);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- Label compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths);
-
- __ SmiToInteger32(min_length, min_length);
-
- // Registers scratch2 and scratch3 are free.
- Label result_not_equal;
- Label loop;
- {
- // Check characters 0 .. min_length - 1 in a loop.
- // Use scratch3 as loop index, min_length as limit and scratch2
- // for computation.
- const Register index = scratch3;
- __ movl(index, Immediate(0)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- // TODO(lrn): Could we load more than one character at a time?
- __ movb(scratch2, FieldOperand(left,
- index,
- times_1,
- SeqAsciiString::kHeaderSize));
- // Increment index and use -1 modifier on next load to give
- // the previous load extra time to complete.
- __ addl(index, Immediate(1));
- __ cmpb(scratch2, FieldOperand(right,
- index,
- times_1,
- SeqAsciiString::kHeaderSize - 1));
- __ j(not_equal, &result_not_equal);
- __ cmpl(index, min_length);
- __ j(not_equal, &loop);
+ frame_->Push(left);
+ frame_->Push(right);
+ return frame_->CallStub(stub, 2);
}
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
- __ j(not_zero, &result_not_equal);
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- Label result_greater;
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
- __ j(greater, &result_greater);
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
-
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
-
- // Check for identity.
- Label not_same;
- __ cmpq(rdx, rax);
- __ j(not_equal, &not_same);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of ascii strings.
- __ IncrementCounter(&Counters::string_compare_native, 1);
- // Drop arguments from the stack
- __ pop(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
#undef __
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 31f229de..911ca163 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -492,6 +492,11 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Generate a stub call from the virtual frame.
+ Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
+ Result* left,
+ Result* right);
+
// Emits code sequence that jumps to a JumpTarget if the inputs
// are both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
@@ -683,6 +688,9 @@ class CodeGenerator: public AstVisitor {
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -752,357 +760,6 @@ class CodeGenerator: public AstVisitor {
};
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
- public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
- Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags,
- TypeInfo operands_type = TypeInfo::Unknown())
- : op_(op),
- mode_(mode),
- flags_(flags),
- args_in_registers_(false),
- args_reversed_(false),
- static_operands_type_(operands_type),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
- name_(NULL) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- flags_(FlagBits::decode(key)),
- args_in_registers_(ArgsInRegistersBits::decode(key)),
- args_reversed_(ArgsReversedBits::decode(key)),
- static_operands_type_(TypeInfo::ExpandedRepresentation(
- StaticTypeInfoBits::decode(key))),
- runtime_operands_type_(type_info),
- name_(NULL) {
- }
-
- // Generate code to call the stub with the supplied arguments. This will add
- // code at the call site to prepare arguments either in registers or on the
- // stack together with the actual call.
- void GenerateCall(MacroAssembler* masm, Register left, Register right);
- void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
- void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
- Result GenerateCall(MacroAssembler* masm,
- VirtualFrame* frame,
- Result* left,
- Result* right);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
- bool args_in_registers_; // Arguments passed in registers not on the stack.
- bool args_reversed_; // Left and right argument are swapped.
-
- // Number type information of operands, determined by code generator.
- TypeInfo static_operands_type_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo runtime_operands_type_;
-
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub %d (op %s), "
- "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_),
- static_cast<int>(args_in_registers_),
- static_cast<int>(args_reversed_),
- static_operands_type_.ToString());
- }
-#endif
-
- // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
- class ArgsReversedBits: public BitField<bool, 10, 1> {};
- class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
- class StaticTypeInfoBits: public BitField<int, 12, 3> {};
- class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 18 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | ArgsInRegistersBits::encode(args_in_registers_)
- | ArgsReversedBits::encode(args_reversed_)
- | StaticTypeInfoBits::encode(
- static_operands_type_.ThreeBitRepresentation())
- | RuntimeTypeInfoBits::encode(runtime_operands_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
-
- bool ArgsInRegistersSupported() {
- return (op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV);
- }
- bool IsOperationCommutative() {
- return (op_ == Token::ADD) || (op_ == Token::MUL);
- }
-
- void SetArgsInRegisters() { args_in_registers_ = true; }
- void SetArgsReversed() { args_reversed_ = true; }
- bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgsInRegisters() { return args_in_registers_; }
- bool HasArgsReversed() { return args_reversed_; }
-
- bool ShouldGenerateSmiCode() {
- return HasSmiCodeInStub() &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
- }
-};
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the symbol table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
-};
-
-
-class StringAddStub: public CodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
-
- void Generate(MacroAssembler* masm);
-
- // Should the stub check whether arguments are strings?
- bool string_check_;
-};
-
-
-class SubStringStub: public CodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public CodeStub {
- public:
- explicit StringCompareStub() {}
-
- // Compare two flat ascii strings and returns result in rax after popping two
- // arguments from the stack.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public CodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
-};
-
-
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, address and scratch) OOOOAAAASSSS.
- class ScratchBits : public BitField<uint32_t, 0, 4> {};
- class AddressBits : public BitField<uint32_t, 4, 4> {};
- class ObjectBits : public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index d5b7e776..2c1056f5 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -47,22 +47,35 @@ bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList pointer_regs,
+ RegList object_regs,
+ RegList non_object_regs,
bool convert_call_to_jmp) {
- // Save the content of all general purpose registers in memory. This copy in
- // memory is later pushed onto the JS expression stack for the fake JS frame
- // generated and also to the C frame generated on top of that. In the JS
- // frame ONLY the registers containing pointers will be pushed on the
- // expression stack. This causes the GC to update these pointers so that
- // they will have the correct value when returning from the debugger.
- __ SaveRegistersToMemory(kJSCallerSaved);
-
// Enter an internal frame.
__ EnterInternalFrame();
- // Store the registers containing object pointers on the expression stack to
- // make sure that these are correctly updated during GC.
- __ PushRegistersFromMemory(pointer_regs);
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as as two smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ ASSERT(!reg.is(kScratchRegister));
+ if ((object_regs & (1 << r)) != 0) {
+ __ push(reg);
+ }
+ // Store the 64-bit value as two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ movq(kScratchRegister, reg);
+ __ Integer32ToSmi(reg, reg);
+ __ push(reg);
+ __ sar(kScratchRegister, Immediate(32));
+ __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+ __ push(kScratchRegister);
+ }
+ }
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
@@ -70,12 +83,29 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ xor_(rax, rax); // No arguments (argc == 0).
__ movq(rbx, ExternalReference::debug_break());
- CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
+ CEntryStub ceb(1);
__ CallStub(&ceb);
- // Restore the register values containing object pointers from the expression
- // stack in the reverse order as they where pushed.
- __ PopRegistersToMemory(pointer_regs);
+ // Restore the register values from the expression stack.
+ for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if (FLAG_debug_code) {
+ __ Set(reg, kDebugZapValue);
+ }
+ if ((object_regs & (1 << r)) != 0) {
+ __ pop(reg);
+ }
+ // Reconstruct the 64-bit value from two smis.
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ shl(kScratchRegister, Immediate(32));
+ __ pop(reg);
+ __ SmiToInteger32(reg, reg);
+ __ or_(reg, kScratchRegister);
+ }
+ }
// Get rid of the internal frame.
__ LeaveInternalFrame();
@@ -83,12 +113,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ pop(rax);
+ __ addq(rsp, Immediate(kPointerSize));
}
- // Finally restore all registers.
- __ RestoreRegistersFromMemory(kJSCallerSaved);
-
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
@@ -100,12 +127,11 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-x64.cc)
+ // Register state for IC call call (from ic-x64.cc)
// ----------- S t a t e -------------
- // -- rax: number of arguments
+ // -- rcx: function name
// -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
}
@@ -117,7 +143,7 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// -- rax: number of arguments
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
}
@@ -127,7 +153,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// -- rax : key
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
}
@@ -138,7 +164,8 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- rcx : key
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
}
@@ -148,7 +175,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- rax : receiver
// -- rcx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
}
@@ -157,7 +184,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: return value
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), true);
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
}
@@ -168,7 +195,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -- rcx : name
// -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
+ Generate_DebugBreakCallHelper(
+ masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
}
@@ -177,7 +205,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, 0, 0, false);
}
@@ -197,7 +225,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
- Generate_DebugBreakCallHelper(masm, 0, true);
+ Generate_DebugBreakCallHelper(masm, 0, 0, true);
}
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index 85ebc958..fd265351 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -35,19 +35,6 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
@@ -58,55 +45,10 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- // Determine frame type.
+ ASSERT(*state->pc_address != NULL);
return EXIT;
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-byte* InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-byte* JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
} } // namespace v8::internal
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 470b5bf7..ccd0392a 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_X64)
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@@ -229,6 +230,13 @@ void FullCodeGenerator::EmitReturnSequence() {
}
+FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
+ Token::Value op, Expression* left, Expression* right) {
+ ASSERT(ShouldInlineSmiCase(op));
+ return kNoConstants;
+}
+
+
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
@@ -253,20 +261,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
case Expression::kTest:
// For simplicity we always test the accumulator register.
if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -295,20 +290,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
case Expression::kTest:
Move(result_register(), slot);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(result_register(), slot);
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -334,20 +316,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
case Expression::kTest:
__ Move(result_register(), lit->handle());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ Move(result_register(), lit->handle());
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- __ push(result_register());
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -374,20 +343,7 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) {
case Expression::kTest:
__ pop(result_register());
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ pop(result_register());
- break;
- case kStack:
- __ movq(result_register(), Operand(rsp, 0));
- break;
- }
- DoTest(context);
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -422,56 +378,7 @@ void FullCodeGenerator::DropAndApply(int count,
case Expression::kTest:
__ Drop(count);
if (!reg.is(result_register())) __ movq(result_register(), reg);
- DoTest(context);
- break;
-
- case Expression::kValueTest:
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ Drop(count);
- if (!reg.is(result_register())) __ movq(result_register(), reg);
- break;
- case kStack:
- if (count > 1) __ Drop(count - 1);
- __ movq(result_register(), reg);
- __ movq(Operand(rsp, 0), result_register());
- break;
- }
- DoTest(context);
- break;
- }
-}
-
-
-void FullCodeGenerator::PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false) {
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- // In an effect context, the true and the false case branch to the
- // same label.
- *if_true = *if_false = materialize_true;
- break;
- case Expression::kValue:
- *if_true = materialize_true;
- *if_false = materialize_false;
- break;
- case Expression::kTest:
- *if_true = true_label_;
- *if_false = false_label_;
- break;
- case Expression::kValueTest:
- *if_true = materialize_true;
- *if_false = false_label_;
- break;
- case Expression::kTestValue:
- *if_true = true_label_;
- *if_false = materialize_false;
+ DoTest(true_label_, false_label_, fall_through_);
break;
}
}
@@ -512,32 +419,6 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTest:
break;
-
- case Expression::kValueTest:
- __ bind(materialize_true);
- switch (location_) {
- case kAccumulator:
- __ Move(result_register(), Factory::true_value());
- break;
- case kStack:
- __ Push(Factory::true_value());
- break;
- }
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(materialize_false);
- switch (location_) {
- case kAccumulator:
- __ Move(result_register(), Factory::false_value());
- break;
- case kStack:
- __ Push(Factory::false_value());
- break;
- }
- __ jmp(false_label_);
- break;
}
}
@@ -565,78 +446,19 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
break;
}
case Expression::kTest:
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- // If value is false it's needed.
- if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- break;
- case kStack:
- // If value is false it's needed.
- if (!flag) __ PushRoot(Heap::kFalseValueRootIndex);
- break;
- }
- __ jmp(flag ? true_label_ : false_label_);
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- // If value is true it's needed.
- if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- break;
- case kStack:
- // If value is true it's needed.
- if (flag) __ PushRoot(Heap::kTrueValueRootIndex);
- break;
+ if (flag) {
+ if (true_label_ != fall_through_) __ jmp(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ jmp(false_label_);
}
- __ jmp(flag ? true_label_ : false_label_);
break;
}
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- // The value to test is in the accumulator. If the value might be needed
- // on the stack (value/test and test/value contexts with a stack location
- // desired), then the value is already duplicated on the stack.
- ASSERT_NE(NULL, true_label_);
- ASSERT_NE(NULL, false_label_);
-
- // In value/test and test/value expression contexts with stack as the
- // desired location, there is already an extra value on the stack. Use a
- // label to discard it if unneeded.
- Label discard;
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_false = &discard;
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- break;
- case kStack:
- if_true = &discard;
- break;
- }
- break;
- }
-
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
// Emit the inlined tests assumed by the stub.
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
__ j(equal, if_false);
@@ -650,83 +472,28 @@ void FullCodeGenerator::DoTest(Expression::Context context) {
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, if_true);
- // Save a copy of the value if it may be needed and isn't already saved.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ push(result_register());
- break;
- case kStack:
- break;
- }
- break;
- }
-
// Call the ToBoolean stub for all other cases.
ToBooleanStub stub;
__ push(result_register());
__ CallStub(&stub);
__ testq(rax, rax);
- // The stub returns nonzero for true. Complete based on the context.
- switch (context) {
- case Expression::kUninitialized:
- case Expression::kEffect:
- case Expression::kValue:
- UNREACHABLE();
-
- case Expression::kTest:
- __ j(not_zero, true_label_);
- __ jmp(false_label_);
- break;
+ // The stub returns nonzero for true.
+ Split(not_zero, if_true, if_false, fall_through);
+}
- case Expression::kValueTest:
- switch (location_) {
- case kAccumulator:
- __ j(zero, &discard);
- __ pop(result_register());
- __ jmp(true_label_);
- break;
- case kStack:
- __ j(not_zero, true_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- case Expression::kTestValue:
- switch (location_) {
- case kAccumulator:
- __ j(not_zero, &discard);
- __ pop(result_register());
- __ jmp(false_label_);
- break;
- case kStack:
- __ j(zero, false_label_);
- break;
- }
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ j(cc, if_true);
+ } else if (if_true == fall_through) {
+ __ j(NegateCondition(cc), if_false);
+ } else {
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
@@ -912,17 +679,18 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile the label expression.
VisitForValue(clause->label(), kAccumulator);
- // Perform the comparison as if via '==='. The comparison stub expects
- // the smi vs. smi case to be handled before it is called.
- Label slow_case;
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
- __ JumpIfNotBothSmi(rdx, rax, &slow_case);
- __ SmiCompare(rdx, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target()->entry_label());
+ // Perform the comparison as if via '==='.
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
+ Label slow_case;
+ __ movq(rdx, Operand(rsp, 0)); // Switch value.
+ __ JumpIfNotBothSmi(rdx, rax, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(not_equal, &next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ jmp(clause->body_target()->entry_label());
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(equal, true);
__ CallStub(&stub);
__ testq(rax, rax);
@@ -1206,7 +974,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ movq(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized);
@@ -1395,10 +1163,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
- Property* prop = expr->target()->AsProperty();
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
}
// Evaluate LHS expression.
@@ -1409,57 +1178,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
- VisitForValue(prop->obj(), kAccumulator);
+ VisitForValue(property->obj(), kAccumulator);
__ push(result_register());
} else {
- VisitForValue(prop->obj(), kStack);
+ VisitForValue(property->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kAccumulator);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kAccumulator);
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(property->obj(), kStack);
+ VisitForValue(property->key(), kStack);
}
break;
}
- // If we have a compound assignment: Get value of LHS expression and
- // store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
- EmitNamedPropertyLoad(prop);
- __ push(result_register());
+ EmitNamedPropertyLoad(property);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(prop);
- __ push(result_register());
+ EmitKeyedPropertyLoad(property);
break;
}
- location_ = saved_location;
- }
- // Evaluate RHS expression.
- Expression* rhs = expr->value();
- VisitForValue(rhs, kAccumulator);
+ Token::Value op = expr->binary_op();
+ ConstantOperand constant = ShouldInlineSmiCase(op)
+ ? GetConstantOperand(op, expr->target(), expr->value())
+ : kNoConstants;
+ ASSERT(constant == kRightConstant || constant == kNoConstants);
+ if (constant == kNoConstants) {
+ __ push(rax); // Left operand goes on the stack.
+ VisitForValue(expr->value(), kAccumulator);
+ }
- // If we have a compound assignment: Apply operator.
- if (expr->is_compound()) {
- Location saved_location = location_;
- location_ = kAccumulator;
- EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr,
+ op,
+ Expression::kValue,
+ mode,
+ expr->target(),
+ expr->value(),
+ constant);
+ } else {
+ EmitBinaryOp(op, Expression::kValue, mode);
+ }
location_ = saved_location;
+
+ } else {
+ VisitForValue(expr->value(), kAccumulator);
}
// Record source position before possible IC call.
@@ -1500,13 +1282,85 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ Expression::Context context,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right,
+ ConstantOperand constant) {
+ ASSERT(constant == kNoConstants); // Only handled case.
+
+ // Do combined smi check of the operands. Left operand is on the
+ // stack (popped into rdx). Right operand is in rax but moved into
+ // rcx to make the shifts easier.
+ Label done, stub_call, smi_case;
+ __ pop(rdx);
+ __ movq(rcx, rax);
+ Condition smi = __ CheckBothSmi(rdx, rax);
+ __ j(smi, &smi_case);
+
+ __ bind(&stub_call);
+ GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+ if (stub.ArgsInRegistersSupported()) {
+ stub.GenerateCall(masm_, rdx, rcx);
+ } else {
+ __ push(rdx);
+ __ push(rcx);
+ __ CallStub(&stub);
+ }
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(rax, rdx, rcx);
+ break;
+ case Token::SHL:
+ __ SmiShiftLeft(rax, rdx, rcx);
+ break;
+ case Token::SHR:
+ __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::ADD:
+ __ SmiAdd(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::SUB:
+ __ SmiSub(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::MUL:
+ __ SmiMul(rax, rdx, rcx, &stub_call);
+ break;
+ case Token::BIT_OR:
+ __ SmiOr(rax, rdx, rcx);
+ break;
+ case Token::BIT_AND:
+ __ SmiAnd(rax, rdx, rcx);
+ break;
+ case Token::BIT_XOR:
+ __ SmiXor(rax, rdx, rcx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ __ bind(&done);
+ Apply(context, rax);
+}
+
+
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- __ push(result_register());
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
+ Expression::Context context,
+ OverwriteMode mode) {
+ GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS);
+ if (stub.ArgsInRegistersSupported()) {
+ __ pop(rdx);
+ stub.GenerateCall(masm_, rdx, rax);
+ } else {
+ __ push(result_register());
+ __ CallStub(&stub);
+ }
Apply(context, rax);
}
@@ -1929,11 +1783,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
- // Push function on the stack.
- VisitForValue(expr->expression(), kStack);
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForValue(expr->expression(), kStack);
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -1946,16 +1800,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function, arg_count into rdi and rax.
+ // Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
- // Function is in rsp[arg_count + 1].
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in rax, or pop it.
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
}
@@ -1967,7 +1818,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_true);
__ jmp(if_false);
@@ -1984,11 +1837,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
Condition positive_smi = __ CheckPositiveSmi(rax);
- __ j(positive_smi, if_true);
- __ jmp(if_false);
+ Split(positive_smi, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2002,7 +1856,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
@@ -2016,8 +1872,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
__ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
- __ j(below_equal, if_true);
- __ jmp(if_false);
+ Split(below_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2031,12 +1886,13 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(above_equal, if_true);
- __ jmp(if_false);
+ Split(above_equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2050,14 +1906,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
- __ jmp(if_false);
+ Split(not_zero, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2072,7 +1929,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
@@ -2090,12 +1949,13 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2109,12 +1969,13 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2128,12 +1989,13 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
__ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2146,7 +2008,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
// Get the frame pointer for the calling frame.
__ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2162,8 +2026,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
__ bind(&check_frame_marker);
__ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2179,12 +2042,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
__ pop(rbx);
__ cmpq(rax, rbx);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
Apply(context_, if_true, if_false);
}
@@ -2193,8 +2057,8 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
+ // ArgumentsAccessStub expects the key in rdx and the formal
+ // parameter count in rax.
VisitForValue(args->at(0), kAccumulator);
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
@@ -2398,7 +2262,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
VisitForValue(args->at(0), kStack); // Load the object.
VisitForValue(args->at(1), kAccumulator); // Load the value.
- __ pop(rbx); // rax = value. ebx = object.
+ __ pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -2628,14 +2492,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
- ASSERT(args->length() == 1);
- VisitForValue(args->at(0), kStack);
- __ CallRuntime(Runtime::kRegExpCloneResult, 1);
- Apply(context_, rax);
-}
-
-
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
@@ -2741,6 +2597,40 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
}
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ testl(FieldOperand(rax, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ __ j(zero, if_true);
+ __ jmp(if_false);
+
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForValue(args->at(0), kAccumulator);
+
+ __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ IndexFromHash(rax, rax);
+
+ Apply(context_, rax);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -2840,19 +2730,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- switch (location_) {
- case kAccumulator:
- __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
- break;
- case kStack:
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- break;
- }
- // Fall through.
case Expression::kTest:
- case Expression::kValueTest:
__ jmp(false_label_);
break;
}
@@ -2864,42 +2742,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
-
+ Label* fall_through = NULL;
// Notice that the labels are swapped.
- PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
-
- VisitForControl(expr->expression(), if_true, if_false);
-
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ Move(rcx, proxy->name());
- __ movq(rax, CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ push(rax);
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitForValue(expr->expression(), kStack);
- }
-
+ VisitForTypeofValue(expr->expression(), kStack);
__ CallRuntime(Runtime::kTypeof, 1);
Apply(context_, rax);
break;
@@ -2920,9 +2774,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::SUB: {
Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
GenericUnaryOpStub stub(Token::SUB, overwrite);
@@ -2936,27 +2788,24 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::BIT_NOT: {
Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- bool can_overwrite =
- (expr->expression()->AsBinaryOperation() != NULL &&
- expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
+ // The generic unary operation stub expects the argument to be
+ // in the accumulator register rax.
VisitForValue(expr->expression(), kAccumulator);
- // Avoid calling the stub for Smis.
- Label smi, done;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &smi);
- // Non-smi: call stub leaving result in accumulator register.
+ Label done;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label call_stub;
+ __ JumpIfNotSmi(rax, &call_stub);
+ __ SmiNot(rax, rax);
+ __ jmp(&done);
+ __ bind(&call_stub);
+ }
+ bool overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode mode =
+ overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ GenericUnaryOpStub stub(Token::BIT_NOT, mode);
__ CallStub(&stub);
- __ jmp(&done);
- // Perform operation directly on Smis.
- __ bind(&smi);
- __ SmiNot(result_register(), result_register());
__ bind(&done);
- Apply(context_, result_register());
+ Apply(context_, rax);
break;
}
@@ -2968,6 +2817,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
// Invalid left-hand-sides are rewritten to have a 'throw
// ReferenceError' as the left-hand side.
@@ -3033,8 +2883,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case Expression::kValue:
case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
// Save the result on the stack. If we have a named or keyed property
// we store the result under the receiver that is currently on top
// of the stack.
@@ -3055,7 +2903,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
- if (loop_depth() > 0) {
+ if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ SmiAddConstant(rax, rax, Smi::FromInt(1));
} else {
@@ -3138,83 +2986,144 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- VisitForEffect(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- VisitForValue(expr->left(), kStack);
- VisitForValue(expr->right(), kAccumulator);
- EmitBinaryOp(expr->op(), context_);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitNullCompare(bool strict,
- Register obj,
- Register null_const,
- Label* if_true,
- Label* if_false,
- Register scratch) {
- __ cmpq(obj, null_const);
- if (strict) {
- __ j(equal, if_true);
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ Move(rcx, proxy->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (where == kStack) __ push(rax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(rsi);
+ __ Push(proxy->name());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ if (where == kStack) __ push(rax);
} else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr, where);
+ }
+}
+
+
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ Literal* right_literal = right->AsLiteral();
+ if (right_literal == NULL) return false;
+ Handle<Object> right_literal_value = right_literal->handle();
+ if (!right_literal_value->IsString()) return false;
+ UnaryOperation* left_unary = left->AsUnaryOperation();
+ if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+ Handle<String> check = Handle<String>::cast(right_literal_value);
+
+ VisitForTypeofValue(left_unary->expression(), kAccumulator);
+ if (check->Equals(Heap::number_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_true);
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::string_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // Check for undetectable objects => false.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, if_false);
+ __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE);
+ Split(below, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
- __ CompareRoot(obj, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(rax, Heap::kFalseValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- __ JumpIfSmi(obj, if_false);
- // It can be an undetectable object.
- __ movq(scratch, FieldOperand(obj, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // Check for undetectable objects => true.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::function_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+ __ j(equal, if_true);
+ // Regular expressions => 'function' (they are callable).
+ __ CmpInstanceType(rdx, JS_REGEXP_TYPE);
+ Split(equal, if_true, if_false, fall_through);
+ } else if (check->Equals(Heap::object_symbol())) {
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, if_true);
+ // Regular expressions => 'function', not 'object'.
+ __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx);
+ __ j(equal, if_false);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_true);
+ __ j(not_zero, if_false);
+ // Check for JS objects => true.
+ __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, if_false);
+ __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE);
+ Split(below_equal, if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
}
- __ jmp(if_false);
+
+ return true;
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
- PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ Apply(context_, if_true, if_false);
+ return;
+ }
VisitForValue(expr->left(), kStack);
- switch (expr->op()) {
+ switch (op) {
case Token::IN:
VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ jmp(if_false);
+ Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
@@ -3222,8 +3131,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(zero, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
+ // The stub returns 0 for true.
+ Split(zero, if_true, if_false, fall_through);
break;
}
@@ -3231,28 +3140,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
- switch (expr->op()) {
+ switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through.
- case Token::EQ: {
+ case Token::EQ:
cc = equal;
__ pop(rdx);
- // If either operand is constant null we do a fast compare
- // against null.
- Literal* right_literal = expr->right()->AsLiteral();
- Literal* left_literal = expr->left()->AsLiteral();
- if (right_literal != NULL && right_literal->handle()->IsNull()) {
- EmitNullCompare(strict, rdx, rax, if_true, if_false, rcx);
- Apply(context_, if_true, if_false);
- return;
- } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
- EmitNullCompare(strict, rax, rdx, if_true, if_false, rcx);
- Apply(context_, if_true, if_false);
- return;
- }
break;
- }
case Token::LT:
cc = less;
__ pop(rdx);
@@ -3279,20 +3174,18 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ JumpIfNotBothSmi(rax, rdx, &slow_case);
- __ SmiCompare(rdx, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
+ if (ShouldInlineSmiCase(op)) {
+ Label slow_case;
+ __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+ __ SmiCompare(rdx, rax);
+ Split(cc, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
- __ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(cc, if_true);
- __ jmp(if_false);
+ Split(cc, if_true, if_false, fall_through);
}
}
@@ -3302,6 +3195,35 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
+ Split(equal, if_true, if_false, fall_through);
+ } else {
+ __ j(equal, if_true);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, if_true);
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, if_false);
+ // It can be an undetectable object.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ Split(not_zero, if_true, if_false, fall_through);
+ }
+ Apply(context_, if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, rax);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 114ae84d..a74e621e 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -572,31 +572,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
}
-// Picks out an array index from the hash field.
-static void GenerateIndexFromHash(MacroAssembler* masm,
- Register key,
- Register hash) {
- // Register use:
- // key - holds the overwritten key on exit.
- // hash - holds the key's hash. Clobbered.
-
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- __ and_(hash, Immediate(String::kArrayIndexValueMask));
- __ shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- __ Integer32ToSmi(key, hash);
-}
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -743,7 +718,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
__ bind(&index_string);
- GenerateIndexFromHash(masm, rax, rbx);
+ __ IndexFromHash(rbx, rax);
__ jmp(&index_smi);
}
@@ -1599,7 +1574,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
__ bind(&index_string);
- GenerateIndexFromHash(masm, rcx, rbx);
+ __ IndexFromHash(rbx, rcx);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index a6837bbe..165c51dd 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -391,6 +391,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+ // The assert checks that the constants for the maximum number of digits
+ // for an array index cached in the hash field and the number of bits
+ // reserved for it does not conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. Even if we subsequently go to
+ // the slow case, converting the key to a smi is always valid.
+ // key: string key
+ // hash: key's hash field, including its array index value.
+ and_(hash, Immediate(String::kArrayIndexValueMask));
+ shr(hash, Immediate(String::kHashShift));
+ // Here we actually clobber the key which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ Integer32ToSmi(index, hash);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
@@ -2102,91 +2121,8 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-void MacroAssembler::PushRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Push the content of the memory location to the stack.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- push(Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::SaveRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of registers to memory location.
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(Operand(kScratchRegister, 0), reg);
- }
- }
-}
-
-
-void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of memory location to registers.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- Register reg = { r };
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(reg, Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::PopRegistersToMemory(RegList regs) {
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Pop the content from the stack to the memory location.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- pop(Operand(kScratchRegister, 0));
- }
- }
-}
-
-
-void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs) {
- ASSERT(!scratch.is(kScratchRegister));
- ASSERT(!base.is(kScratchRegister));
- ASSERT(!base.is(scratch));
- ASSERT((regs & ~kJSCallerSaved) == 0);
- // Copy the content of the stack to the memory location and adjust base.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- if ((regs & (1 << r)) != 0) {
- movq(scratch, Operand(base, 0));
- ExternalReference reg_addr =
- ExternalReference(Debug_Address::Register(i));
- movq(kScratchRegister, reg_addr);
- movq(Operand(kScratchRegister, 0), scratch);
- lea(base, Operand(base, kPointerSize));
- }
- }
-}
+#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
xor_(rax, rax); // no arguments
@@ -2356,8 +2292,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
- bool save_rax) {
+void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Setup the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -2366,7 +2301,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
push(rbp);
movq(rbp, rsp);
- // Reserve room for entry stack pointer and push the debug marker.
+ // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
@@ -2385,23 +2320,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
store_rax(context_address);
}
-void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode,
- int result_size,
+void MacroAssembler::EnterExitFrameEpilogue(int result_size,
int argc) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Save the state of all registers to the stack from the memory
- // location. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // TODO(1243899): This should be symmetric to
- // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
- // correct here, but computed for the other call. Very error
- // prone! FIX THIS. Actually there are deeper problems with
- // register saving than this asymmetry (see the bug report
- // associated with this issue).
- PushRegistersFromMemory(kJSCallerSaved);
- }
-#endif
-
#ifdef _WIN64
// Reserve space on stack for result and argument structures, if necessary.
int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
@@ -2430,48 +2350,35 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode,
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
- EnterExitFramePrologue(mode, true);
+void MacroAssembler::EnterExitFrame(int result_size) {
+ EnterExitFramePrologue(true);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
- EnterExitFrameEpilogue(mode, result_size, 2);
+ EnterExitFrameEpilogue(result_size, 2);
}
-void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+void MacroAssembler::EnterApiExitFrame(int stack_space,
int argc,
int result_size) {
- EnterExitFramePrologue(mode, false);
+ EnterExitFramePrologue(false);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, (stack_space * kPointerSize) + offset));
- EnterExitFrameEpilogue(mode, result_size, argc);
+ EnterExitFrameEpilogue(result_size, argc);
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
+void MacroAssembler::LeaveExitFrame(int result_size) {
// Registers:
// r12 : argv
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Restore the memory copy of the registers by digging them out from
- // the stack. This is needed to allow nested break points.
- if (mode == ExitFrame::MODE_DEBUG) {
- // It's okay to clobber register rbx below because we don't need
- // the function pointer after this.
- const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
- lea(rbx, Operand(rbp, kOffset));
- CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
- }
-#endif
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 08cb3773..9f5a7465 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -132,13 +132,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void PushRegistersFromMemory(RegList regs);
- void PopRegistersToMemory(RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
@@ -161,17 +154,16 @@ class MacroAssembler: public Assembler {
// debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
// to the first argument in register rsi.
- void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
+ void EnterExitFrame(int result_size = 1);
- void EnterApiExitFrame(ExitFrame::Mode mode,
- int stack_space,
+ void EnterApiExitFrame(int stack_space,
int argc,
int result_size = 1);
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
+ void LeaveExitFrame(int result_size = 1);
// ---------------------------------------------------------------------------
@@ -723,6 +715,12 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -878,8 +876,8 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
- void EnterExitFramePrologue(ExitFrame::Mode mode, bool save_rax);
- void EnterExitFrameEpilogue(ExitFrame::Mode mode, int result_size, int argc);
+ void EnterExitFramePrologue(bool save_rax);
+ void EnterExitFrameEpilogue(int result_size, int argc);
// Allocation support helpers.
// Loads the top of new-space into the result register.
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 80318648..91e2b449 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -32,11 +32,9 @@
#include "serialize.h"
#include "unicode.h"
#include "log.h"
-#include "ast.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
-#include "x64/macro-assembler-x64.h"
#include "x64/regexp-macro-assembler-x64.h"
namespace v8 {
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index e0644cd6..f500ce64 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -31,9 +31,10 @@
#if defined(V8_TARGET_ARCH_X64)
#include "ic-inl.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "stub-cache.h"
-#include "macro-assembler-x64.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
@@ -1291,8 +1292,69 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rax);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+ rbx, rdx, rdi, name, &miss);
+
+ Register receiver = rax;
+ Register index = rdi;
+ Register scratch1 = rbx;
+ Register scratch2 = rdx;
+ Register result = rax;
+ __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
}
@@ -1301,10 +1363,67 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSFunction* function,
String* name,
CheckType check) {
- // TODO(722): implement this.
- return Heap::undefined_value();
-}
+ // ----------- S t a t e -------------
+ // -- rcx : function name
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- ...
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString()) return Heap::undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+ GenerateNameCheck(name, &miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rax);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+ rbx, rdx, rdi, name, &miss);
+
+ Register receiver = rbx;
+ Register index = rdi;
+ Register scratch = rdx;
+ Register result = rax;
+ __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ __ ret((argc + 1) * kPointerSize);
+ __ bind(&miss);
+ Object* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return GetCode(function);
+}
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index b8b008c7..88e7cc88 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1230,9 +1230,9 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame.
- PushElementAt(arg_count + 1);
+ PushElementAt(arg_count);
Result function = Pop();
- PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver.
+ PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(rdi);
// Constructors are called with the number of arguments in register
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 895e2453..d03f5f7a 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -35,6 +35,11 @@ test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
# BUG(382): Weird test. Can't guarantee that it never times out.
test-api/ApplyInterruption: PASS || TIMEOUT
+# Bug (484): This test which we thought was originally corrected in r5236
+# is reappering. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index adaf1027..4b6fa9c8 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -8603,7 +8603,7 @@ TEST(PreCompileInvalidPreparseDataError) {
// ScriptDataImpl private implementation details
const int kUnsignedSize = sizeof(unsigned);
const int kHeaderSize = 4;
- const int kFunctionEntrySize = 4;
+ const int kFunctionEntrySize = 5;
const int kFunctionEntryStartOffset = 0;
const int kFunctionEntryEndOffset = 1;
unsigned* sd_data =
@@ -8625,6 +8625,8 @@ TEST(PreCompileInvalidPreparseDataError) {
try_catch.Reset();
// Overwrite function bar's start position with 200. The function entry
// will not be found when searching for it by position.
+ sd = v8::ScriptData::PreCompile(script, i::StrLength(script));
+ sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
200;
compiled_script = Script::New(source, NULL, sd);
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 5952b636..fee66241 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -226,13 +226,17 @@ TEST(4) {
double a;
double b;
double c;
- float d;
- float e;
+ double d;
+ double e;
+ double f;
+ int i;
+ float x;
+ float y;
} T;
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
- // the doubles t.a, t.b, and t.c, and floats t.d, t.e.
+ // the doubles and floats.
Assembler assm(NULL, 0);
Label L, C;
@@ -254,15 +258,34 @@ TEST(4) {
__ vmov(d4, r2, r3);
__ vstr(d4, r4, OFFSET_OF(T, b));
- // Load t.d and t.e, switch values, and store back to the struct.
- __ vldr(s0, r4, OFFSET_OF(T, d));
- __ vldr(s1, r4, OFFSET_OF(T, e));
- __ vmov(s2, s0);
- __ vmov(s0, s1);
- __ vmov(s1, s2);
- __ vstr(s0, r4, OFFSET_OF(T, d));
- __ vstr(s1, r4, OFFSET_OF(T, e));
-
+ // Load t.x and t.y, switch values, and store back to the struct.
+ __ vldr(s0, r4, OFFSET_OF(T, x));
+ __ vldr(s31, r4, OFFSET_OF(T, y));
+ __ vmov(s16, s0);
+ __ vmov(s0, s31);
+ __ vmov(s31, s16);
+ __ vstr(s0, r4, OFFSET_OF(T, x));
+ __ vstr(s31, r4, OFFSET_OF(T, y));
+
+ // Move a literal into a register that can be encoded in the instruction.
+ __ vmov(d4, 1.0);
+ __ vstr(d4, r4, OFFSET_OF(T, e));
+
+ // Move a literal into a register that requires 64 bits to encode.
+ // 0x3ff0000010000000 = 1.000000059604644775390625
+ __ vmov(d4, 1.000000059604644775390625);
+ __ vstr(d4, r4, OFFSET_OF(T, d));
+
+ // Convert from floating point to integer.
+ __ vmov(d4, 2.0);
+ __ vcvt_s32_f64(s31, d4);
+ __ vstr(s31, r4, OFFSET_OF(T, i));
+
+ // Convert from integer to floating point.
+ __ mov(lr, Operand(42));
+ __ vmov(s31, lr);
+ __ vcvt_f64_s32(d4, s31);
+ __ vstr(d4, r4, OFFSET_OF(T, f));
__ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
@@ -278,12 +301,20 @@ TEST(4) {
t.a = 1.5;
t.b = 2.75;
t.c = 17.17;
- t.d = 4.5;
- t.e = 9.0;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.i = 0;
+ t.x = 4.5;
+ t.y = 9.0;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(4.5, t.e);
- CHECK_EQ(9.0, t.d);
+ CHECK_EQ(4.5, t.y);
+ CHECK_EQ(9.0, t.x);
+ CHECK_EQ(2, t.i);
+ CHECK_EQ(42.0, t.f);
+ CHECK_EQ(1.0, t.e);
+ CHECK_EQ(1.000000059604644775390625, t.d);
CHECK_EQ(4.25, t.c);
CHECK_EQ(4.25, t.b);
CHECK_EQ(1.5, t.a);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 1e93bf59..9531b572 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -869,8 +869,8 @@ static void DebugEventBreakPointCollectGarbage(
// Scavenge.
Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
} else {
- // Mark sweep (and perhaps compact).
- Heap::CollectAllGarbage(false);
+ // Mark sweep compact.
+ Heap::CollectAllGarbage(true);
}
}
}
@@ -1127,7 +1127,7 @@ TEST(BreakPointICCall) {
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
- // Run with breakpoint
+ // Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(1, break_point_hit_count);
@@ -1144,6 +1144,73 @@ TEST(BreakPointICCall) {
}
+// Test that a break point can be set at an IC call location and survive a GC.
+TEST(BreakPointICCallWithGC) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function bar(){return 1;}"))->Run();
+ v8::Script::Compile(v8::String::New("function foo(){return bar();}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // Run without breakpoints.
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ int bp = SetBreakPoint(foo, 0);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
+// Test that a break point can be set at an IC call location and survive a GC.
+TEST(BreakPointConstructCallWithGC) {
+ break_point_hit_count = 0;
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage,
+ v8::Undefined());
+ v8::Script::Compile(v8::String::New("function bar(){ this.x = 1;}"))->Run();
+ v8::Script::Compile(v8::String::New(
+ "function foo(){return new bar(1).x;}"))->Run();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("foo")));
+
+ // Run without breakpoints.
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Run with breakpoint.
+ int bp = SetBreakPoint(foo, 0);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, break_point_hit_count);
+ CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(2, break_point_hit_count);
+
+ // Run without breakpoints.
+ ClearBreakPoint(bp);
+ foo->Call(env->Global(), 0, NULL);
+ CHECK_EQ(2, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test that a break point can be set at a return store location.
TEST(BreakPointReturn) {
break_point_hit_count = 0;
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 0ba4f9ae..61f5ffc7 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -422,6 +422,19 @@ TEST(Vfp) {
COMPARE(vmov(d3, d3, eq),
"0eb03b43 vmov.f64eq d3, d3");
+ COMPARE(vmov(s0, s31),
+ "eeb00a6f vmov.f32 s0, s31");
+ COMPARE(vmov(s31, s0),
+ "eef0fa40 vmov.f32 s31, s0");
+ COMPARE(vmov(r0, s0),
+ "ee100a10 vmov r0, s0");
+ COMPARE(vmov(r10, s31),
+ "ee1faa90 vmov r10, s31");
+ COMPARE(vmov(s0, r0),
+ "ee000a10 vmov s0, r0");
+ COMPARE(vmov(s31, r10),
+ "ee0faa90 vmov s31, r10");
+
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
COMPARE(vadd(d3, d4, d5, mi),
@@ -451,6 +464,41 @@ TEST(Vfp) {
"eeb70b00 vmov.f64 d0, #1");
COMPARE(vmov(d2, -13.0),
"eeba2b0a vmov.f64 d2, #-13");
+
+ COMPARE(vldr(s0, r0, 0),
+ "ed900a00 vldr s0, [r0 + 4*0]");
+ COMPARE(vldr(s1, r1, 4),
+ "edd10a01 vldr s1, [r1 + 4*1]");
+ COMPARE(vldr(s15, r4, 16),
+ "edd47a04 vldr s15, [r4 + 4*4]");
+ COMPARE(vldr(s16, r5, 20),
+ "ed958a05 vldr s16, [r5 + 4*5]");
+ COMPARE(vldr(s31, r10, 1020),
+ "eddafaff vldr s31, [r10 + 4*255]");
+
+ COMPARE(vstr(s0, r0, 0),
+ "ed800a00 vstr s0, [r0 + 4*0]");
+ COMPARE(vstr(s1, r1, 4),
+ "edc10a01 vstr s1, [r1 + 4*1]");
+ COMPARE(vstr(s15, r8, 8),
+ "edc87a02 vstr s15, [r8 + 4*2]");
+ COMPARE(vstr(s16, r9, 12),
+ "ed898a03 vstr s16, [r9 + 4*3]");
+ COMPARE(vstr(s31, r10, 1020),
+ "edcafaff vstr s31, [r10 + 4*255]");
+
+ COMPARE(vldr(d0, r0, 0),
+ "ed900b00 vldr d0, [r0 + 4*0]");
+ COMPARE(vldr(d1, r1, 4),
+ "ed911b01 vldr d1, [r1 + 4*1]");
+ COMPARE(vldr(d15, r10, 1020),
+ "ed9afbff vldr d15, [r10 + 4*255]");
+ COMPARE(vstr(d0, r0, 0),
+ "ed800b00 vstr d0, [r0 + 4*0]");
+ COMPARE(vstr(d1, r1, 4),
+ "ed811b01 vstr d1, [r1 + 4*1]");
+ COMPARE(vstr(d15, r10, 1020),
+ "ed8afbff vstr d15, [r10 + 4*255]");
}
VERIFY_RUN();
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 605d8831..eec024f3 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -637,22 +637,27 @@ TEST(JSArray) {
// Allocate the object.
Handle<JSObject> object = Factory::NewJSObject(function);
Handle<JSArray> array = Handle<JSArray>::cast(object);
- array->Initialize(0);
+ Object* ok = array->Initialize(0);
+ // We just initialized the VM, no heap allocation failure yet.
+ CHECK(!ok->IsFailure());
// Set array length to 0.
- array->SetElementsLength(Smi::FromInt(0));
+ ok = array->SetElementsLength(Smi::FromInt(0));
+ CHECK(!ok->IsFailure());
CHECK_EQ(Smi::FromInt(0), array->length());
CHECK(array->HasFastElements()); // Must be in fast mode.
// array[length] = name.
- array->SetElement(0, *name);
+ ok = array->SetElement(0, *name);
+ CHECK(!ok->IsFailure());
CHECK_EQ(Smi::FromInt(1), array->length());
CHECK_EQ(array->GetElement(0), *name);
// Set array length with larger than smi value.
Handle<Object> length =
Factory::NewNumberFromUint(static_cast<uint32_t>(Smi::kMaxValue) + 1);
- array->SetElementsLength(*length);
+ ok = array->SetElementsLength(*length);
+ CHECK(!ok->IsFailure());
uint32_t int_length = 0;
CHECK(length->ToArrayIndex(&int_length));
@@ -660,7 +665,8 @@ TEST(JSArray) {
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
// array[length] = name.
- array->SetElement(int_length, *name);
+ ok = array->SetElement(int_length, *name);
+ CHECK(!ok->IsFailure());
uint32_t new_int_length = 0;
CHECK(array->length()->ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
@@ -684,8 +690,11 @@ TEST(JSObjectCopy) {
obj->SetProperty(*first, Smi::FromInt(1), NONE);
obj->SetProperty(*second, Smi::FromInt(2), NONE);
- obj->SetElement(0, *first);
- obj->SetElement(1, *second);
+ Object* ok = obj->SetElement(0, *first);
+ CHECK(!ok->IsFailure());
+
+ ok = obj->SetElement(1, *second);
+ CHECK(!ok->IsFailure());
// Make the clone.
Handle<JSObject> clone = Copy(obj);
@@ -701,8 +710,10 @@ TEST(JSObjectCopy) {
clone->SetProperty(*first, Smi::FromInt(2), NONE);
clone->SetProperty(*second, Smi::FromInt(1), NONE);
- clone->SetElement(0, *second);
- clone->SetElement(1, *first);
+ ok = clone->SetElement(0, *second);
+ CHECK(!ok->IsFailure());
+ ok = clone->SetElement(1, *first);
+ CHECK(!ok->IsFailure());
CHECK_EQ(obj->GetElement(1), clone->GetElement(0));
CHECK_EQ(obj->GetElement(0), clone->GetElement(1));
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 6da1a759..312a443a 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -216,25 +216,25 @@ namespace internal {
class CodeGeneratorPatcher {
public:
CodeGeneratorPatcher() {
- CodeGenerator::InlineRuntimeLUT genGetFramePointer =
+ CodeGenerator::InlineRuntimeLUT gen_get_frame_pointer =
{&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer", 0};
// _RandomHeapNumber is just used as a dummy function that has zero
// arguments, the same as the _GetFramePointer function we actually patch
// in.
bool result = CodeGenerator::PatchInlineRuntimeEntry(
NewString("_RandomHeapNumber"),
- genGetFramePointer, &oldInlineEntry);
+ gen_get_frame_pointer, &old_inline_entry);
CHECK(result);
}
~CodeGeneratorPatcher() {
CHECK(CodeGenerator::PatchInlineRuntimeEntry(
NewString("_GetFramePointer"),
- oldInlineEntry, NULL));
+ old_inline_entry, NULL));
}
private:
- CodeGenerator::InlineRuntimeLUT oldInlineEntry;
+ CodeGenerator::InlineRuntimeLUT old_inline_entry;
};
} } // namespace v8::internal
@@ -273,9 +273,10 @@ static void CreateTraceCallerFunction(const char* func_name,
// StackTracer uses Top::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
- // TODO(711) The hack of replacing the inline runtime function
- // RandomHeapNumber with GetFrameNumber does not work with the way the full
- // compiler generates inline runtime calls.
+ // TODO(711): The hack of replacing the inline runtime function
+ // RandomHeapNumber with GetFrameNumber does not work with the way
+ // the full compiler generates inline runtime calls.
+ i::FLAG_full_compiler = false;
i::FLAG_always_full_compiler = false;
TickSample sample;
@@ -313,9 +314,10 @@ TEST(CFromJSStackTrace) {
// Top::c_entry_fp value. In this case, StackTracer uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
- // TODO(711) The hack of replacing the inline runtime function
- // RandomHeapNumber with GetFrameNumber does not work with the way the full
- // compiler generates inline runtime calls.
+ // TODO(711): The hack of replacing the inline runtime function
+ // RandomHeapNumber with GetFrameNumber does not work with the way
+ // the full compiler generates inline runtime calls.
+ i::FLAG_full_compiler = false;
i::FLAG_always_full_compiler = false;
TickSample sample;
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index ea477de6..b3622028 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -775,4 +775,21 @@ TEST(RecordStackTraceAtStartProfiling) {
CHECK_EQ(0, current->children()->length());
}
+
+TEST(Issue51919) {
+ CpuProfilesCollection collection;
+ i::EmbeddedVector<char*,
+ CpuProfilesCollection::kMaxSimultaneousProfiles> titles;
+ for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
+ i::Vector<char> title = i::Vector<char>::New(16);
+ i::OS::SNPrintF(title, "%d", i);
+ CHECK(collection.StartProfiling(title.start(), i + 1)); // UID must be > 0.
+ titles[i] = title.start();
+ }
+ CHECK(!collection.StartProfiling(
+ "maximum", CpuProfilesCollection::kMaxSimultaneousProfiles + 1));
+ for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
+ i::DeleteArray(titles[i]);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 3ec25c9f..20fb2fe6 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -98,13 +98,6 @@ static int make_code(TypeCode type, int id) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-static int register_code(int reg) {
- return Debug::k_register_address << kDebugIdShift | reg;
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
TEST(ExternalReferenceEncoder) {
StatsTable::SetCounterFunction(counter_function);
Heap::Setup(false);
@@ -115,10 +108,6 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Runtime::kAbort));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)),
- Encode(encoder, Debug_Address(Debug::k_register_address, 3)));
-#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function_prototype =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
@@ -156,10 +145,6 @@ TEST(ExternalReferenceDecoder) {
decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
-#ifdef ENABLE_DEBUGGER_SUPPORT
- CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)),
- decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3))));
-#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(keyed_load_function.address(),
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index bcb185d2..88ef0a20 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -131,3 +131,64 @@ TEST(MemCopy) {
buffer2.Dispose();
buffer1.Dispose();
}
+
+
+TEST(Collector) {
+ Collector<int> collector(8);
+ const int kLoops = 5;
+ const int kSequentialSize = 1000;
+ const int kBlockSize = 7;
+ for (int loop = 0; loop < kLoops; loop++) {
+ Vector<int> block = collector.AddBlock(7, 0xbadcafe);
+ for (int i = 0; i < kSequentialSize; i++) {
+ collector.Add(i);
+ }
+ for (int i = 0; i < kBlockSize - 1; i++) {
+ block[i] = i * 7;
+ }
+ }
+ Vector<int> result = collector.ToVector();
+ CHECK_EQ(kLoops * (kBlockSize + kSequentialSize), result.length());
+ for (int i = 0; i < kLoops; i++) {
+ int offset = i * (kSequentialSize + kBlockSize);
+ for (int j = 0; j < kBlockSize - 1; j++) {
+ CHECK_EQ(j * 7, result[offset + j]);
+ }
+ CHECK_EQ(0xbadcafe, result[offset + kBlockSize - 1]);
+ for (int j = 0; j < kSequentialSize; j++) {
+ CHECK_EQ(j, result[offset + kBlockSize + j]);
+ }
+ }
+ result.Dispose();
+}
+
+
+TEST(SequenceCollector) {
+ SequenceCollector<int> collector(8);
+ const int kLoops = 5000;
+ const int kMaxSequenceSize = 13;
+ int total_length = 0;
+ for (int loop = 0; loop < kLoops; loop++) {
+ int seq_length = loop % kMaxSequenceSize;
+ collector.StartSequence();
+ for (int j = 0; j < seq_length; j++) {
+ collector.Add(j);
+ }
+ Vector<int> sequence = collector.EndSequence();
+ for (int j = 0; j < seq_length; j++) {
+ CHECK_EQ(j, sequence[j]);
+ }
+ total_length += seq_length;
+ }
+ Vector<int> result = collector.ToVector();
+ CHECK_EQ(total_length, result.length());
+ int offset = 0;
+ for (int loop = 0; loop < kLoops; loop++) {
+ int seq_length = loop % kMaxSequenceSize;
+ for (int j = 0; j < seq_length; j++) {
+ CHECK_EQ(j, result[offset]);
+ offset++;
+ }
+ }
+ result.Dispose();
+}
diff --git a/test/cctest/testcfg.py b/test/cctest/testcfg.py
index c2427c8d..485f2cfd 100644
--- a/test/cctest/testcfg.py
+++ b/test/cctest/testcfg.py
@@ -31,15 +31,12 @@ from os.path import join, dirname, exists
import platform
import utils
-CCTEST_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap']
-
class CcTestCase(test.TestCase):
def __init__(self, path, executable, mode, raw_name, dependency, context):
- super(CcTestCase, self).__init__(context, path)
+ super(CcTestCase, self).__init__(context, path, mode)
self.executable = executable
- self.mode = mode
self.raw_name = raw_name
self.dependency = dependency
@@ -54,8 +51,7 @@ class CcTestCase(test.TestCase):
serialization_file += '_' + self.GetName()
serialization_option = '--testing_serialization_file=' + serialization_file
result = [ self.executable, name, serialization_option ]
- if self.mode == 'debug':
- result += CCTEST_DEBUG_FLAGS
+ result += self.context.GetVmFlags(self, self.mode)
return result
def GetCommand(self):
diff --git a/test/es5conform/testcfg.py b/test/es5conform/testcfg.py
index d1f23aa3..43d61047 100644
--- a/test/es5conform/testcfg.py
+++ b/test/es5conform/testcfg.py
@@ -37,9 +37,8 @@ HARNESS_FILES = ['sth.js']
class ES5ConformTestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
- super(ES5ConformTestCase, self).__init__(context, path)
+ super(ES5ConformTestCase, self).__init__(context, path, mode)
self.filename = filename
- self.mode = mode
self.framework = framework
self.root = root
@@ -55,7 +54,7 @@ class ES5ConformTestCase(test.TestCase):
return 'FAILED!' in output.stdout
def GetCommand(self):
- result = [self.context.GetVm(self.mode)]
+ result = self.context.GetVmCommand(self, self.mode)
result += ['-e', 'var window = this']
result += self.framework
result.append(self.filename)
diff --git a/test/message/testcfg.py b/test/message/testcfg.py
index 6004282b..7dae047d 100644
--- a/test/message/testcfg.py
+++ b/test/message/testcfg.py
@@ -35,11 +35,10 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class MessageTestCase(test.TestCase):
def __init__(self, path, file, expected, mode, context, config):
- super(MessageTestCase, self).__init__(context, path)
+ super(MessageTestCase, self).__init__(context, path, mode)
self.file = file
self.expected = expected
self.config = config
- self.mode = mode
def IgnoreLine(self, str):
"""Ignore empty lines and valgrind output."""
@@ -79,7 +78,7 @@ class MessageTestCase(test.TestCase):
return self.path[-1]
def GetCommand(self):
- result = [self.config.context.GetVm(self.mode)]
+ result = self.config.context.GetVmCommand(self, self.mode)
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
diff --git a/test/mjsunit/array-splice.js b/test/mjsunit/array-splice.js
index 88c48764..68dd9b2b 100644
--- a/test/mjsunit/array-splice.js
+++ b/test/mjsunit/array-splice.js
@@ -67,13 +67,8 @@
(function() {
var array;
for (var i = 0; i < 7; i++) {
- // SpiderMonkey and JSC return undefined in the case where no
- // arguments are given instead of using the implicit undefined
- // arguments. This does not follow ECMA-262, but we do the same for
- // compatibility.
- // TraceMonkey follows ECMA-262 though.
array = [1, 2, 3]
- assertEquals(undefined, array.splice());
+ assertEquals([], array.splice());
assertEquals([1, 2, 3], array);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
diff --git a/test/mjsunit/const-eval-init.js b/test/mjsunit/const-eval-init.js
index 5bcd9175..3f380d9d 100644
--- a/test/mjsunit/const-eval-init.js
+++ b/test/mjsunit/const-eval-init.js
@@ -67,7 +67,9 @@ function testAssignmentArgument(x) {
assertEquals(7, x);
}
-testAssignmentArgument();
+for (var i = 0; i < 10000; i++) {
+ testAssignmentArgument();
+}
assertEquals(6, x);
__defineSetter__('x', function() { throw 42; });
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index 11ac2e0b..901c190c 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -176,7 +176,11 @@ var knownProblems = {
"_GetFromCache": true,
// This function expects its first argument to be a non-smi.
- "_IsStringWrapperSafeForDefaultValueOf" : true
+ "_IsStringWrapperSafeForDefaultValueOf" : true,
+
+ // Only applicable to strings.
+ "_HasCachedArrayIndex": true,
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/test/mjsunit/regress/regress-842.js b/test/mjsunit/regress/regress-842.js
new file mode 100644
index 00000000..18ad6d3d
--- /dev/null
+++ b/test/mjsunit/regress/regress-842.js
@@ -0,0 +1,42 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// 842 describes a scenario where Object.prototype or Array.prototype is
+// changed (a property is added) after which freeze and seal would fail
+// since that property would be listed when doing a "for (var key in names)"
+
+Array.prototype.myfunc = function() {};
+Array.prototype[10] = 42;
+Array.prototype.length = 3000;
+
+var obj = { name: "n1" };
+
+try {
+ obj = Object.freeze(obj);
+} catch (e) {
+ assertUnreachable();
+}
diff --git a/test/mjsunit/regress/regress-851.js b/test/mjsunit/regress/regress-851.js
new file mode 100644
index 00000000..d8f693ee
--- /dev/null
+++ b/test/mjsunit/regress/regress-851.js
@@ -0,0 +1,32 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var i = 0;
+for (var i = 0; i < 10000; i++) {
+ Object.freeze({});
+ assertNull(JSON.stringify({x: null}).match(/\0/));
+}
diff --git a/test/mjsunit/testcfg.py b/test/mjsunit/testcfg.py
index 49064b12..d8fe24d3 100644
--- a/test/mjsunit/testcfg.py
+++ b/test/mjsunit/testcfg.py
@@ -31,7 +31,6 @@ from os.path import join, dirname, exists
import re
import tempfile
-MJSUNIT_DEBUG_FLAGS = ['--enable-slow-asserts', '--debug-code', '--verify-heap']
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
@@ -40,10 +39,9 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
class MjsunitTestCase(test.TestCase):
def __init__(self, path, file, mode, context, config):
- super(MjsunitTestCase, self).__init__(context, path)
+ super(MjsunitTestCase, self).__init__(context, path, mode)
self.file = file
self.config = config
- self.mode = mode
self.self_script = False
def GetLabel(self):
@@ -53,13 +51,11 @@ class MjsunitTestCase(test.TestCase):
return self.path[-1]
def GetCommand(self):
- result = [self.config.context.GetVm(self.mode)]
+ result = self.config.context.GetVmCommand(self, self.mode)
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
result += flags_match.group(1).strip().split()
- if self.mode == 'debug':
- result += MJSUNIT_DEBUG_FLAGS
additional_files = []
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'
@@ -94,8 +90,8 @@ class MjsunitTestCase(test.TestCase):
self.self_script = self_script
return self_script
- def Cleanup(self):
- if self.self_script:
+ def AfterRun(self, result):
+ if self.self_script and (not result.HasPreciousOutput()):
test.CheckedUnlink(self.self_script)
class MjsunitTestConfiguration(test.TestConfiguration):
diff --git a/test/mjsunit/third_party/array-splice-webkit.js b/test/mjsunit/third_party/array-splice-webkit.js
index b676a7c1..974ac55e 100644
--- a/test/mjsunit/third_party/array-splice-webkit.js
+++ b/test/mjsunit/third_party/array-splice-webkit.js
@@ -38,7 +38,7 @@ assertArrayEquals(['a','b'], arr.splice(0));
assertArrayEquals([], arr)
arr = ['a','b','c','d'];
-assertEquals(undefined, arr.splice())
+assertEquals([], arr.splice())
assertArrayEquals(['a','b','c','d'], arr);
assertArrayEquals(['a','b','c','d'], arr.splice(undefined))
assertArrayEquals([], arr);
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 28fc0631..1768c397 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -235,11 +235,6 @@ ecma_3/Number/15.7.4.7-1: FAIL_OK
# toExponential argument restricted to range 0..20 in JSC/V8
ecma_3/Number/15.7.4.6-1: FAIL_OK
-# Array.prototype.slice with zero arguments return undefined in JSC/V8,
-# empty array in Spider/TraceMonkey.
-js1_5/Array/regress-451483: FAIL_OK
-
-
#:=== RegExp:===
# To be compatible with JSC we silently ignore flags that do not make
# sense. These tests expects us to throw exceptions.
diff --git a/test/mozilla/testcfg.py b/test/mozilla/testcfg.py
index d1c1767a..7a6438f1 100644
--- a/test/mozilla/testcfg.py
+++ b/test/mozilla/testcfg.py
@@ -57,9 +57,8 @@ TEST_DIRS = """
class MozillaTestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
- super(MozillaTestCase, self).__init__(context, path)
+ super(MozillaTestCase, self).__init__(context, path, mode)
self.filename = filename
- self.mode = mode
self.framework = framework
self.root = root
@@ -75,8 +74,8 @@ class MozillaTestCase(test.TestCase):
return 'FAILED!' in output.stdout
def GetCommand(self):
- result = [self.context.GetVm(self.mode), '--expose-gc',
- join(self.root, 'mozilla-shell-emulation.js')]
+ result = self.context.GetVmCommand(self, self.mode) + \
+ [ '--expose-gc', join(self.root, 'mozilla-shell-emulation.js') ]
result += self.framework
result.append(self.filename)
return result
diff --git a/test/sputnik/testcfg.py b/test/sputnik/testcfg.py
index 65923822..f7a5edcc 100644
--- a/test/sputnik/testcfg.py
+++ b/test/sputnik/testcfg.py
@@ -36,9 +36,8 @@ import time
class SputnikTestCase(test.TestCase):
def __init__(self, case, path, context, mode):
- super(SputnikTestCase, self).__init__(context, path)
+ super(SputnikTestCase, self).__init__(context, path, mode)
self.case = case
- self.mode = mode
self.tmpfile = None
self.source = None
@@ -56,12 +55,13 @@ class SputnikTestCase(test.TestCase):
self.tmpfile.Write(self.GetSource())
self.tmpfile.Close()
- def AfterRun(self):
- self.tmpfile.Dispose()
+ def AfterRun(self, result):
+ # Dispose the temporary file if everything looks okay.
+ if not result.HasPreciousOutput(): self.tmpfile.Dispose()
self.tmpfile = None
def GetCommand(self):
- result = [self.context.GetVm(self.mode)]
+ result = self.context.GetVmCommand(self, self.mode)
result.append(self.tmpfile.name)
return result
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 47f95026..b355fb6f 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -343,8 +343,6 @@
'../../src/fixed-dtoa.h',
'../../src/flags.cc',
'../../src/flags.h',
- '../../src/flow-graph.cc',
- '../../src/flow-graph.h',
'../../src/frame-element.cc',
'../../src/frame-element.h',
'../../src/frames-inl.h',
@@ -495,6 +493,8 @@
'../../src/arm/assembler-arm.cc',
'../../src/arm/assembler-arm.h',
'../../src/arm/builtins-arm.cc',
+ '../../src/arm/code-stubs-arm.cc',
+ '../../src/arm/code-stubs-arm.h',
'../../src/arm/codegen-arm.cc',
'../../src/arm/codegen-arm.h',
'../../src/arm/constants-arm.h',
@@ -541,6 +541,8 @@
'../../src/ia32/assembler-ia32.cc',
'../../src/ia32/assembler-ia32.h',
'../../src/ia32/builtins-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.cc',
+ '../../src/ia32/code-stubs-ia32.h',
'../../src/ia32/codegen-ia32.cc',
'../../src/ia32/codegen-ia32.h',
'../../src/ia32/cpu-ia32.cc',
@@ -575,6 +577,8 @@
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
'../../src/x64/builtins-x64.cc',
+ '../../src/x64/code-stubs-x64.cc',
+ '../../src/x64/code-stubs-x64.h',
'../../src/x64/codegen-x64.cc',
'../../src/x64/codegen-x64.h',
'../../src/x64/cpu-x64.cc',
diff --git a/tools/oom_dump/oom_dump.cc b/tools/oom_dump/oom_dump.cc
index ae14cde0..1bf5ac19 100644
--- a/tools/oom_dump/oom_dump.cc
+++ b/tools/oom_dump/oom_dump.cc
@@ -162,7 +162,7 @@ void DumpHeapStats(const char *minidump_file) {
ReadPointedValue(memory_region, heap_stats_addr, offset)
CHECK(READ_FIELD(0) == v8::internal::HeapStats::kStartMarker);
- CHECK(READ_FIELD(23) == v8::internal::HeapStats::kEndMarker);
+ CHECK(READ_FIELD(24) == v8::internal::HeapStats::kEndMarker);
const int new_space_size = READ_FIELD(1);
const int new_space_capacity = READ_FIELD(2);
@@ -184,6 +184,7 @@ void DumpHeapStats(const char *minidump_file) {
const int destroyed_global_handle_count = READ_FIELD(18);
const int memory_allocator_size = READ_FIELD(19);
const int memory_allocator_capacity = READ_FIELD(20);
+ const int os_error = READ_FIELD(23);
#undef READ_FIELD
int objects_per_type[v8::internal::LAST_TYPE + 1] = {0};
@@ -243,6 +244,7 @@ void DumpHeapStats(const char *minidump_file) {
PRINT_INT_STAT(destroyed_global_handle_count);
PRINT_MB_STAT(memory_allocator_size);
PRINT_MB_STAT(memory_allocator_capacity);
+ PRINT_INT_STAT(os_error);
#undef PRINT_STAT
printf("\n");
diff --git a/tools/test.py b/tools/test.py
index f17e9b1c..4b916f85 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -331,10 +331,11 @@ class CommandOutput(object):
class TestCase(object):
- def __init__(self, context, path):
+ def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
+ self.mode = mode
def IsNegative(self):
return False
@@ -355,14 +356,19 @@ class TestCase(object):
def RunCommand(self, command):
full_command = self.context.processor(command)
- output = Execute(full_command, self.context, self.context.timeout)
+ output = Execute(full_command,
+ self.context,
+ self.context.GetTimeout(self.mode))
self.Cleanup()
- return TestOutput(self, full_command, output)
+ return TestOutput(self,
+ full_command,
+ output,
+ self.context.store_unexpected_output)
def BeforeRun(self):
pass
- def AfterRun(self):
+ def AfterRun(self, result):
pass
def Run(self):
@@ -370,7 +376,7 @@ class TestCase(object):
try:
result = self.RunCommand(self.GetCommand())
finally:
- self.AfterRun()
+ self.AfterRun(result)
return result
def Cleanup(self):
@@ -379,10 +385,11 @@ class TestCase(object):
class TestOutput(object):
- def __init__(self, test, command, output):
+ def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
+ self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
@@ -395,6 +402,9 @@ class TestOutput(object):
outcome = PASS
return not outcome in self.test.outcomes
+ def HasPreciousOutput(self):
+ return self.UnexpectedOutput() and self.store_unexpected_output
+
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
@@ -557,6 +567,11 @@ class TestSuite(object):
return self.name
+# Use this to run several variants of the tests, e.g.:
+# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
+VARIANT_FLAGS = [[]]
+
+
class TestRepository(TestSuite):
def __init__(self, path):
@@ -583,8 +598,12 @@ class TestRepository(TestSuite):
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
- def ListTests(self, current_path, path, context, mode):
- return self.GetConfiguration(context).ListTests(current_path, path, mode)
+ def AddTestsToList(self, result, current_path, path, context, mode):
+ for v in VARIANT_FLAGS:
+ tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
+ for t in tests: t.variant_flags = v
+ result += tests
+
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
@@ -611,7 +630,7 @@ class LiteralTestSuite(TestSuite):
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
- result += test.ListTests(full_path, path, context, mode)
+ test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
@@ -619,12 +638,20 @@ class LiteralTestSuite(TestSuite):
test.GetTestStatus(context, sections, defs)
-SUFFIX = {'debug': '_g', 'release': ''}
+SUFFIX = {
+ 'debug' : '_g',
+ 'release' : '' }
+FLAGS = {
+ 'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
+ 'release' : []}
+TIMEOUT_SCALEFACTOR = {
+ 'debug' : 4,
+ 'release' : 1 }
class Context(object):
- def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs):
+ def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
@@ -632,6 +659,7 @@ class Context(object):
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
+ self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = self.vm_root + SUFFIX[mode]
@@ -639,6 +667,15 @@ class Context(object):
name = name + '.exe'
return name
+ def GetVmCommand(self, testcase, mode):
+ return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
+
+ def GetVmFlags(self, testcase, mode):
+ return testcase.variant_flags + FLAGS[mode]
+
+ def GetTimeout(self, mode):
+ return self.timeout * TIMEOUT_SCALEFACTOR[mode]
+
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
@@ -1121,7 +1158,13 @@ def BuildOptions():
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
- result.add_option("--shell", help="Path to V8 shell", default="shell");
+ result.add_option("--shell", help="Path to V8 shell", default="shell")
+ result.add_option("--store-unexpected-output",
+ help="Store the temporary JS files from tests that fails",
+ dest="store_unexpected_output", default=True, action="store_true")
+ result.add_option("--no-store-unexpected-output",
+ help="Deletes the temporary JS files from tests that fails",
+ dest="store_unexpected_output", action="store_false")
return result
@@ -1258,11 +1301,13 @@ def Main():
shell = abspath(options.shell)
buildspace = dirname(shell)
+
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
- options.suppress_dialogs)
+ options.suppress_dialogs,
+ options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
@@ -1278,7 +1323,7 @@ def Main():
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
-
+
# Get status for tests
sections = [ ]
defs = { }
diff --git a/tools/utils.py b/tools/utils.py
index 3a55722e..8083091b 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -63,14 +63,20 @@ def GuessOS():
return None
+# This will default to building the 32 bit VM even on machines that are capable
+# of running the 64 bit VM. Use the scons option --arch=x64 to force it to build
+# the 64 bit VM.
def GuessArchitecture():
id = platform.machine()
+ id = id.lower() # Windows 7 capitalizes 'AMD64'.
if id.startswith('arm'):
return 'arm'
- elif (not id) or (not re.match('(x|i[3-6])86', id) is None):
+ elif (not id) or (not re.match('(x|i[3-6])86$', id) is None):
return 'ia32'
elif id == 'i86pc':
return 'ia32'
+ elif id == 'x86_64':
+ return 'ia32'
elif id == 'amd64':
return 'ia32'
else:
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 0ca6a9dd..3ebc4584 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -223,14 +223,12 @@
9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
- 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; };
9FA38BB71175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
9FA38BB81175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
9FA38BBA1175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
- 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */; };
9FA38BBE1175B2D200C4CD55 /* full-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA51175B2D200C4CD55 /* full-codegen.cc */; };
9FA38BBF1175B2D200C4CD55 /* liveedit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA91175B2D200C4CD55 /* liveedit.cc */; };
9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
@@ -248,6 +246,8 @@
C2BD4BE51201661F0046BF9F /* dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2BD4BD5120165460046BF9F /* dtoa.cc */; };
C2D1E9731212F2BC00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
C2D1E9741212F2CF00187A52 /* objects-visiting.cc in Sources */ = {isa = PBXBuildFile; fileRef = C2D1E9711212F27B00187A52 /* objects-visiting.cc */; };
+ C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */; };
+ C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = C68081B012251239001EAFE4 /* code-stubs-ia32.cc */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@@ -596,8 +596,6 @@
9FA38BA01175B2D200C4CD55 /* double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = double.h; sourceTree = "<group>"; };
9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-dtoa.cc"; sourceTree = "<group>"; };
9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fast-dtoa.h"; sourceTree = "<group>"; };
- 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "flow-graph.cc"; sourceTree = "<group>"; };
- 9FA38BA41175B2D200C4CD55 /* flow-graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flow-graph.h"; sourceTree = "<group>"; };
9FA38BA51175B2D200C4CD55 /* full-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "full-codegen.cc"; sourceTree = "<group>"; };
9FA38BA61175B2D200C4CD55 /* full-codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "full-codegen.h"; sourceTree = "<group>"; };
9FA38BA71175B2D200C4CD55 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = "<group>"; };
@@ -628,6 +626,10 @@
C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "fixed-dtoa.h"; sourceTree = "<group>"; };
C2D1E9711212F27B00187A52 /* objects-visiting.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "objects-visiting.cc"; sourceTree = "<group>"; };
C2D1E9721212F27B00187A52 /* objects-visiting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "objects-visiting.h"; sourceTree = "<group>"; };
+ C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-arm.cc"; path = "arm/code-stubs-arm.cc"; sourceTree = "<group>"; };
+ C68081AC1225120B001EAFE4 /* code-stubs-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-arm.h"; path = "arm/code-stubs-arm.h"; sourceTree = "<group>"; };
+ C68081B012251239001EAFE4 /* code-stubs-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "code-stubs-ia32.cc"; path = "ia32/code-stubs-ia32.cc"; sourceTree = "<group>"; };
+ C68081B412251257001EAFE4 /* code-stubs-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "code-stubs-ia32.h"; path = "ia32/code-stubs-ia32.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@@ -716,6 +718,10 @@
897FF0D70E719AB300D62E90 /* C++ */ = {
isa = PBXGroup;
children = (
+ C68081B412251257001EAFE4 /* code-stubs-ia32.h */,
+ C68081B012251239001EAFE4 /* code-stubs-ia32.cc */,
+ C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */,
+ C68081AC1225120B001EAFE4 /* code-stubs-arm.h */,
897FF1750E719B8F00D62E90 /* SConscript */,
897FF0F60E719B8F00D62E90 /* accessors.cc */,
897FF0F70E719B8F00D62E90 /* accessors.h */,
@@ -816,8 +822,6 @@
89471C7F0EB23EE400B6874B /* flag-definitions.h */,
897FF1350E719B8F00D62E90 /* flags.cc */,
897FF1360E719B8F00D62E90 /* flags.h */,
- 9FA38BA31175B2D200C4CD55 /* flow-graph.cc */,
- 9FA38BA41175B2D200C4CD55 /* flow-graph.h */,
8981F5FE1010500F00D1520E /* frame-element.cc */,
8981F5FF1010500F00D1520E /* frame-element.h */,
897FF1370E719B8F00D62E90 /* frames-arm.cc */,
@@ -1298,7 +1302,6 @@
89A88E040E71A65D0043BA31 /* factory.cc in Sources */,
9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89A88E050E71A65D0043BA31 /* flags.cc in Sources */,
- 9FA38BBD1175B2D200C4CD55 /* flow-graph.cc in Sources */,
8981F6001010501900D1520E /* frame-element.cc in Sources */,
89A88E060E71A6600043BA31 /* frames-ia32.cc in Sources */,
89A88E070E71A6610043BA31 /* frames.cc in Sources */,
@@ -1369,6 +1372,7 @@
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
+ C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1422,7 +1426,6 @@
89F23C570E78D5B2006B2466 /* factory.cc in Sources */,
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
89F23C580E78D5B2006B2466 /* flags.cc in Sources */,
- 9FA38BB61175B2D200C4CD55 /* flow-graph.cc in Sources */,
8981F6011010502800D1520E /* frame-element.cc in Sources */,
89F23C9C0E78D5F1006B2466 /* frames-arm.cc in Sources */,
89F23C5A0E78D5B2006B2466 /* frames.cc in Sources */,
@@ -1494,6 +1497,7 @@
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
+ C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index ef087734..4629b5d3 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -305,6 +305,14 @@
>
</File>
<File
+ RelativePath="..\..\src\ia32\code-stubs-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\ia32\code-stubs-ia32.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>
@@ -481,14 +489,6 @@
>
</File>
<File
- RelativePath="..\..\src\flow-graph.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\flow-graph.h"
- >
- </File>
- <File
RelativePath="..\..\src\frame-element.cc"
>
</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index aa1e8229..4848c9bc 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -277,6 +277,14 @@
>
</File>
<File
+ RelativePath="..\..\src\arm\code-stubs-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\arm\code-stubs-arm.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 33c53940..f5cce219 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -277,6 +277,14 @@
>
</File>
<File
+ RelativePath="..\..\src\x64\code-stubs-x64.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\x64\code-stubs-x64.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\code.h"
>
</File>