summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLeon Clarke <leonclarke@google.com>2010-01-27 15:57:47 +0000
committerLeon Clarke <leonclarke@google.com>2010-01-27 15:57:47 +0000
commit888f6729be6a6f6fbe246cb5a9f122e2dbe455b7 (patch)
tree16f9ce250ef5d36e49a0fb0018e979002ed5b4fb
parent1471e9f5b6f291eea8447f441527cd2a85074a33 (diff)
downloadandroid_external_v8-888f6729be6a6f6fbe246cb5a9f122e2dbe455b7.tar.gz
android_external_v8-888f6729be6a6f6fbe246cb5a9f122e2dbe455b7.tar.bz2
android_external_v8-888f6729be6a6f6fbe246cb5a9f122e2dbe455b7.zip
Pull from svn bleeding_edge@3716
-rw-r--r--AUTHORS1
-rw-r--r--Android.v8common.mk4
-rw-r--r--[-rwxr-xr-x]SConstruct26
-rw-r--r--V8_MERGE_REVISION2
-rw-r--r--samples/lineprocessor.cc8
-rwxr-xr-xsrc/SConscript12
-rw-r--r--src/arm/assembler-arm.cc36
-rw-r--r--src/arm/assembler-arm.h14
-rw-r--r--src/arm/codegen-arm.cc414
-rw-r--r--src/arm/codegen-arm.h78
-rw-r--r--src/arm/constants-arm.h6
-rw-r--r--src/arm/disasm-arm.cc52
-rw-r--r--src/arm/full-codegen-arm.cc (renamed from src/arm/fast-codegen-arm.cc)193
-rw-r--r--src/arm/ic-arm.cc7
-rw-r--r--src/arm/macro-assembler-arm.cc40
-rw-r--r--src/arm/macro-assembler-arm.h19
-rw-r--r--src/arm/regexp-macro-assembler-arm.cc14
-rw-r--r--src/arm/regexp-macro-assembler-arm.h6
-rw-r--r--src/arm/simulator-arm.cc88
-rw-r--r--src/arm/simulator-arm.h8
-rw-r--r--src/codegen.cc5
-rw-r--r--src/compilation-cache.cc4
-rw-r--r--src/compiler.cc459
-rw-r--r--src/debug-delay.js2
-rw-r--r--src/debug.cc4
-rw-r--r--src/dtoa-config.c7
-rw-r--r--src/flag-definitions.h5
-rw-r--r--src/frames.h3
-rw-r--r--src/full-codegen.cc (renamed from src/fast-codegen.cc)539
-rw-r--r--src/full-codegen.h (renamed from src/fast-codegen.h)50
-rw-r--r--src/handles.cc8
-rw-r--r--src/heap-profiler.cc3
-rw-r--r--src/heap.cc84
-rw-r--r--src/heap.h3
-rw-r--r--src/ia32/assembler-ia32.cc18
-rw-r--r--src/ia32/assembler-ia32.h2
-rw-r--r--src/ia32/codegen-ia32.cc1734
-rw-r--r--src/ia32/codegen-ia32.h98
-rw-r--r--src/ia32/disasm-ia32.cc14
-rw-r--r--src/ia32/full-codegen-ia32.cc (renamed from src/ia32/fast-codegen-ia32.cc)203
-rw-r--r--src/ia32/ic-ia32.cc7
-rw-r--r--src/ia32/macro-assembler-ia32.cc62
-rw-r--r--src/ia32/macro-assembler-ia32.h13
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.cc14
-rw-r--r--src/ia32/regexp-macro-assembler-ia32.h6
-rw-r--r--src/ia32/simulator-ia32.h4
-rw-r--r--src/ic.h7
-rw-r--r--src/jsregexp.cc5
-rw-r--r--src/list.h3
-rw-r--r--src/log.cc123
-rw-r--r--src/log.h20
-rw-r--r--src/macros.py1
-rw-r--r--src/mark-compact.cc76
-rw-r--r--src/mark-compact.h3
-rw-r--r--src/mksnapshot.cc2
-rw-r--r--src/objects.cc6
-rw-r--r--src/objects.h2
-rw-r--r--src/platform-freebsd.cc36
-rw-r--r--src/platform-linux.cc42
-rw-r--r--src/platform-macos.cc24
-rw-r--r--src/platform-openbsd.cc18
-rw-r--r--src/platform-posix.cc18
-rw-r--r--src/platform-solaris.cc607
-rw-r--r--src/platform-win32.cc12
-rw-r--r--src/platform.h23
-rw-r--r--src/regexp-macro-assembler.cc14
-rw-r--r--src/regexp-macro-assembler.h3
-rw-r--r--src/runtime.cc20
-rw-r--r--src/runtime.js4
-rw-r--r--src/serialize.cc334
-rw-r--r--src/serialize.h161
-rw-r--r--src/snapshot-common.cc38
-rw-r--r--src/spaces-inl.h26
-rw-r--r--src/spaces.cc47
-rw-r--r--src/spaces.h74
-rw-r--r--src/v8-counters.h72
-rw-r--r--src/v8.cc1
-rw-r--r--src/v8natives.js4
-rw-r--r--src/version.cc4
-rw-r--r--src/x64/assembler-x64.cc44
-rw-r--r--src/x64/assembler-x64.h8
-rw-r--r--src/x64/codegen-x64.cc1383
-rw-r--r--src/x64/codegen-x64.h126
-rw-r--r--src/x64/disasm-x64.cc30
-rw-r--r--src/x64/full-codegen-x64.cc (renamed from src/x64/fast-codegen-x64.cc)199
-rw-r--r--src/x64/ic-x64.cc7
-rw-r--r--src/x64/macro-assembler-x64.cc50
-rw-r--r--src/x64/macro-assembler-x64.h16
-rw-r--r--src/x64/regexp-macro-assembler-x64.cc16
-rw-r--r--src/x64/regexp-macro-assembler-x64.h14
-rw-r--r--src/x64/simulator-x64.h4
-rw-r--r--test/cctest/test-api.cc117
-rw-r--r--test/cctest/test-assembler-arm.cc61
-rw-r--r--test/cctest/test-compiler.cc3
-rw-r--r--test/cctest/test-debug.cc172
-rw-r--r--test/cctest/test-disasm-ia32.cc2
-rw-r--r--test/cctest/test-heap-profiler.cc8
-rw-r--r--test/cctest/test-heap.cc5
-rw-r--r--test/cctest/test-log-stack-tracer.cc7
-rw-r--r--test/cctest/test-log.cc7
-rw-r--r--test/cctest/test-mark-compact.cc30
-rw-r--r--test/cctest/test-regexp.cc205
-rw-r--r--test/cctest/test-serialize.cc217
-rw-r--r--test/message/bugs/.svn/entries6
-rw-r--r--test/mjsunit/compiler/unary-add.js67
-rw-r--r--test/mjsunit/debug-step.js2
-rw-r--r--test/mjsunit/mjsunit.status4
-rw-r--r--test/mjsunit/regress/regress-580.js (renamed from test/mjsunit/bugs/bug-223.js)36
-rw-r--r--test/mjsunit/tools/logreader.js2
-rw-r--r--test/mjsunit/tools/tickprocessor-test.func-info29
-rw-r--r--test/mjsunit/tools/tickprocessor-test.log27
-rw-r--r--test/mjsunit/tools/tickprocessor.js7
-rw-r--r--test/mjsunit/value-wrapper.js28
-rw-r--r--tools/codemap.js12
-rw-r--r--tools/gyp/v8.gyp10
-rw-r--r--tools/logreader.js5
-rw-r--r--tools/profile.js52
-rw-r--r--tools/tickprocessor.js45
-rw-r--r--tools/tickprocessor.py44
-rw-r--r--tools/utils.py4
-rw-r--r--tools/visual_studio/v8_base.vcproj24
-rw-r--r--tools/visual_studio/v8_base_arm.vcproj24
122 files changed, 6309 insertions, 3059 deletions
diff --git a/AUTHORS b/AUTHORS
index af0ecded..5d712fc2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -10,6 +10,7 @@ Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
+Erich Ocean <erich.ocean@me.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 26501553..a533e66c 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -20,10 +20,10 @@ V8_LOCAL_SRC_FILES := \
src/disassembler.cc \
src/execution.cc \
src/factory.cc \
- src/fast-codegen.cc \
src/flags.cc \
src/frame-element.cc \
src/frames.cc \
+ src/full-codegen.cc \
src/func-name-inferrer.cc \
src/global-handles.cc \
src/handles.cc \
@@ -75,8 +75,8 @@ ifeq ($(TARGET_ARCH),arm)
src/arm/codegen-arm.cc \
src/arm/cpu-arm.cc \
src/arm/disasm-arm.cc \
- src/arm/fast-codegen-arm.cc \
src/arm/frames-arm.cc \
+ src/arm/full-codegen-arm.cc \
src/arm/ic-arm.cc \
src/arm/jump-target-arm.cc \
src/arm/macro-assembler-arm.cc \
diff --git a/SConstruct b/SConstruct
index 739e3445..2967d92e 100755..100644
--- a/SConstruct
+++ b/SConstruct
@@ -35,7 +35,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c, utils
-
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
@@ -157,6 +156,11 @@ LIBRARY_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
+ 'os:solaris': {
+ 'CPPPATH' : ['/usr/local/include'],
+ 'LIBPATH' : ['/usr/local/lib'],
+ 'CCFLAGS': ['-ansi'],
+ },
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
@@ -313,6 +317,10 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
+ 'os:solaris': {
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
+ },
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
},
@@ -362,6 +370,10 @@ CCTEST_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
+ 'os:solaris': {
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
+ },
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
},
@@ -420,6 +432,11 @@ SAMPLE_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
+ 'os:solaris': {
+ 'LIBPATH' : ['/usr/local/lib'],
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
+ },
'os:openbsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
@@ -529,6 +546,10 @@ D8_FLAGS = {
'os:freebsd': {
'LIBS': ['pthread'],
},
+ 'os:solaris': {
+ 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
+ 'LINKFLAGS': ['-mt']
+ },
'os:openbsd': {
'LIBS': ['pthread'],
},
@@ -582,7 +603,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd'],
+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
@@ -936,6 +957,7 @@ def BuildSpecific(env, mode, env_overrides):
# Link the object files into a library.
env.Replace(**context.flags['v8'])
+
context.ApplyEnvOverrides(env)
if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files)
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 3f20451e..c359be99 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We sync with Chromium release revision, which has both webkit revision and V8 revision.
http://src.chromium.org/svn/releases/4.0.269.0/DEPS
-http://v8.googlecode.com/svn/branches/bleeding_edge@3649
+http://v8.googlecode.com/svn/branches/bleeding_edge@3716
diff --git a/samples/lineprocessor.cc b/samples/lineprocessor.cc
index 2e8092ee..505dabf9 100644
--- a/samples/lineprocessor.cc
+++ b/samples/lineprocessor.cc
@@ -134,7 +134,7 @@ int RunMain(int argc, char* argv[]) {
int port_number = -1;
bool wait_for_connection = false;
- bool support_callback = true;
+ bool support_callback = false;
MainCycleType cycle_type = CycleInCpp;
for (int i = 1; i < argc; i++) {
@@ -144,9 +144,7 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines.
continue;
} else if (strcmp(str, "--callback") == 0) {
- // TODO(548): implement this.
- printf("Error: debugger agent callback is not supported yet.\n");
- return 1;
+ support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
wait_for_connection = true;
} else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
@@ -410,7 +408,7 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
v8::Unlocker unlocker;
- res = fgets(buffer, buffer_size, stdin);
+ res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined();
diff --git a/src/SConscript b/src/SConscript
index 4eb87224..7950ab33 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -56,10 +56,10 @@ SOURCES = {
disassembler.cc
execution.cc
factory.cc
- fast-codegen.cc
flags.cc
frame-element.cc
frames.cc
+ full-codegen.cc
func-name-inferrer.cc
global-handles.cc
handles.cc
@@ -112,8 +112,8 @@ SOURCES = {
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
- arm/fast-codegen-arm.cc
arm/frames-arm.cc
+ arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/macro-assembler-arm.cc
@@ -135,8 +135,8 @@ SOURCES = {
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
- ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc
+ ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/macro-assembler-ia32.cc
@@ -152,8 +152,8 @@ SOURCES = {
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
- x64/fast-codegen-x64.cc
x64/frames-x64.cc
+ x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/macro-assembler-x64.cc
@@ -168,6 +168,7 @@ SOURCES = {
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
+ 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
@@ -196,6 +197,9 @@ D8_FILES = {
'os:openbsd': [
'd8-posix.cc'
],
+ 'os:solaris': [
+ 'd8-posix.cc'
+ ],
'os:win32': [
'd8-windows.cc'
],
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 07da8009..74547be6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -1371,6 +1371,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // Ddst = MEM(Rbase + offset).
+ // Instruction details available in ARM DDI 0406A, A8-628.
+ // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1011(11-8) | offset
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = Dsrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+ // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index cd53dd60..8b65b7cd 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -796,6 +796,14 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
+ void vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0c1dbcc5..38f08d1f 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -605,14 +605,19 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -661,6 +666,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
frame_->Drop(size);
frame_->EmitPush(r0);
}
+ ref->set_unloaded();
}
@@ -1244,8 +1250,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Reference target(this, node->proxy());
LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -1932,25 +1936,17 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->EmitPop(r0);
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop();
}
}
}
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2844,7 +2840,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2859,8 +2855,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
node->op() == Token::INIT_CONST) {
LoadAndSpill(node->value());
- } else {
- // +=, *= and similar binary assignments.
+ } else { // Assignment is a compound assignment.
// Get the old value of the lhs.
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
@@ -2881,13 +2876,12 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->EmitPush(r0);
}
}
-
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (var != NULL &&
(var->mode() == Variable::CONST) &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
-
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -3097,16 +3091,20 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- // Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValueAndSpill(); // receiver
-
- // Pass receiver to called function.
+ LoadAndSpill(property->obj());
+ LoadAndSpill(property->key());
+ EmitKeyedLoad(false);
+ frame_->Drop(); // key
+ // Put the function below the receiver.
if (property->is_synthetic()) {
+ // Use the global receiver.
+ frame_->Drop();
+ frame_->EmitPush(r0);
LoadGlobalReceiver(r0);
} else {
- __ ldr(r0, frame_->ElementAt(ref.size()));
- frame_->EmitPush(r0);
+ frame_->EmitPop(r1); // receiver
+ frame_->EmitPush(r0); // function
+ frame_->EmitPush(r1); // receiver
}
// Call the function.
@@ -3470,6 +3468,20 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ cc_reg_ = ne;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3562,7 +3574,8 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kStringCompare, 2);
+ StringCompareStub stub;
+ frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -3792,7 +3805,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
frame_->EmitPush(r0);
}
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -4253,6 +4268,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ RelocInfo::Mode rmode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ frame_->CallCodeObject(ic, rmode, 0);
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
@@ -4319,23 +4344,21 @@ void Reference::GetValue() {
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
- VirtualFrame* frame = cgen_->frame();
- Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
- RelocInfo::Mode rmode = (var == NULL)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, 0);
- frame->EmitPush(r0);
+ cgen_->EmitKeyedLoad(var != NULL);
+ cgen_->frame()->EmitPush(r0);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -4397,6 +4420,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
+ cgen_->UnloadReference(this);
}
@@ -4832,14 +4856,14 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan,
Label* slow,
bool strict) {
- Label lhs_is_smi;
+ Label rhs_is_smi;
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi);
+ __ b(eq, &rhs_is_smi);
- // Rhs is a Smi. Check whether the non-smi is a heap number.
+ // Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
// succeed. Return non-equal (r0 is already not zero)
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
@@ -4848,57 +4872,67 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ b(ne, slow);
}
- // Rhs is a smi, lhs is a number.
- __ push(lr);
-
+ // Lhs (r1) is a smi, rhs (r0) is a number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert lhs to a double in d7 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
+ __ push(lr);
+ // Convert lhs to a double in r2, r3.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Load rhs to a double in r0, r1.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ pop(lr);
}
-
- // r3 and r2 are rhs as double.
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
// We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a Smi.
- __ pop(lr);
+ // since it's a smi.
__ jmp(lhs_not_nan);
- __ bind(&lhs_is_smi);
- // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
__ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Lhs is a smi, rhs is a number.
- // r0 is Smi and r1 is heap number.
- __ push(lr);
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-
+ // Rhs (r0) is a smi, lhs (r1) is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
+ __ push(lr);
+ // Load lhs to a double in r2, r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ // Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
}
-
- __ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -5047,10 +5081,18 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
__ jmp(both_loaded_as_doubles);
}
@@ -5075,8 +5117,9 @@ static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
}
-// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
-// positive or negative to indicate the result of the comparison.
+// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -5101,21 +5144,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
// 3) Fall through to both_loaded_as_doubles.
// 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+ // comparison. If VFP3 is supported the double values of the numbers have
+ // been loaded into d7 and d6. Otherwise, the double values have been loaded
+ // into r0, r1, r2, and r3.
EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
- // r0, r1, r2, r3 are the double representations of the right hand side
- // and the left hand side.
-
+ // The arguments have been converted to doubles and stored in d6 and d7, if
+ // VFP3 is supported, or in r0, r1, r2, and r3.
if (CpuFeatures::IsSupported(VFP3)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
-
__ vcmp(d7, d6);
__ vmrs(pc); // Move vector status bits to normal status bits.
Label nan;
@@ -5154,6 +5195,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
Label check_for_symbols;
+ Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
@@ -5161,7 +5203,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
EmitCheckForTwoHeapNumbers(masm,
&both_loaded_as_doubles,
&check_for_symbols,
- &slow);
+ &flat_string_check);
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5169,10 +5211,27 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
- EmitCheckForSymbols(masm, &slow);
+ EmitCheckForSymbols(masm, &flat_string_check);
}
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ r1,
+ r0,
+ r2,
+ r3,
+ r4,
+ r5);
+ // Never falls through to here.
+
__ bind(&slow);
+
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
@@ -5239,10 +5298,18 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- if (CpuFeatures::IsSupported(VFP3)) {
+ // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+ // using registers d7 and d6 for the double values.
+ bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
+ Token::MOD != operation;
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5324,9 +5391,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
@@ -5334,10 +5408,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
-
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5357,9 +5433,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
@@ -5367,9 +5450,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5382,22 +5468,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- if (CpuFeatures::IsSupported(VFP3) &&
- ((Token::MUL == operation) ||
- (Token::DIV == operation) ||
- (Token::ADD == operation) ||
- (Token::SUB == operation))) {
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
__ vmul(d5, d6, d7);
@@ -5410,15 +5486,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
} else {
UNREACHABLE();
}
-
- __ vmov(r0, r1, d5);
-
- __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
- __ mov(r0, Operand(r5));
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr);
return;
}
+
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -6723,6 +6804,101 @@ int CompareStub::MinorKey() {
}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label compare_lengths;
+ // Find minimum length and length difference.
+ __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+ Register length_delta = scratch3;
+ __ mov(scratch1, scratch2, LeaveCC, gt);
+ Register min_length = scratch1;
+ __ tst(min_length, Operand(min_length));
+ __ b(eq, &compare_lengths);
+
+ // Setup registers so that we only need to increment one register
+ // in the loop.
+ __ add(scratch2, min_length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch2));
+ __ add(right, right, Operand(scratch2));
+ // Registers left and right points to the min_length character of strings.
+ __ rsb(min_length, min_length, Operand(-1));
+ Register index = min_length;
+ // Index starts at -min_length.
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ add(index, index, Operand(1), SetCC);
+ __ ldrb(scratch2, MemOperand(left, index), ne);
+ __ ldrb(scratch4, MemOperand(right, index), ne);
+ // Skip to compare lengths with eq condition true.
+ __ b(eq, &compare_lengths);
+ __ cmp(scratch2, scratch4);
+ __ b(eq, &loop);
+ // Fallthrough with eq condition false.
+ }
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use zero length_delta as result.
+ __ mov(r0, Operand(length_delta), SetCC, eq);
+ // Fall through to here if characters compare not-equal.
+ __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+ __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // sp[0]: return address
+ // sp[4]: right string
+ // sp[8]: left string
+
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(r0, r1);
+ __ b(ne, &not_same);
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+ // Compare flat ascii strings natively. Remove arguments from stack first.
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index f5de0ebc..ccca2e9e 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,57 +43,69 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
- // Generate code to push the value of a reference on top of the expression
- // stack and then spill the stack frame. This function is used temporarily
- // while the code generator is being transformed.
+ // Generate code to pop a reference, push the value of the reference,
+ // and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -274,6 +286,9 @@ class CodeGenerator: public AstVisitor {
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a keyed property, leaving it in r0. The receiver and key are
+ // passed on the stack, and remain there.
+ void EmitKeyedLoad(bool is_global);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
@@ -341,6 +356,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -427,8 +443,8 @@ class CodeGenerator: public AstVisitor {
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
- friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -511,6 +527,28 @@ class GenericBinaryOpStub : public CodeStub {
};
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in r0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 94322073..8a32c95b 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,6 +237,7 @@ class Instr {
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ inline int CoprocessorField() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
@@ -246,6 +247,8 @@ class Instr {
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
+ inline int PField() const { return Bit(24); }
+ inline int UField() const { return Bit(23); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -296,6 +299,7 @@ class Instr {
inline bool HasB() const { return BField() == 1; }
inline bool HasW() const { return WField() == 1; }
inline bool HasL() const { return LField() == 1; }
+ inline bool HasU() const { return UField() == 1; }
inline bool HasSign() const { return SignField() == 1; }
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index afed0fa5..5b314557 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -998,29 +998,43 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- if (instr->Bit(23) == 1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- } else if (instr->Bit(20) == 1) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Bit(21) == 1) {
+ if (instr->CoprocessorField() != 0xB) {
Unknown(instr); // Not used by V8.
} else {
- Unknown(instr); // Not used by V8.
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->HasL()) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ } else {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ }
+ break;
+ case 0x8:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+ }
+ break;
+ case 0xC:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+ }
+ break;
+ default:
+ Unknown(instr); // Not used by V8.
+ break;
+ }
}
}
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 4256e472..1844c285 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "parser.h"
namespace v8 {
@@ -52,7 +52,7 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
int locals_count = fun->scope()->num_stack_slots();
@@ -167,7 +167,7 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -214,7 +214,7 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -250,7 +250,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -270,7 +270,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -290,7 +290,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
}
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -323,7 +323,7 @@ void FastCodeGenerator::ApplyTOS(Expression::Context context) {
}
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -371,7 +371,7 @@ void FastCodeGenerator::DropAndApply(int count,
}
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -432,7 +432,7 @@ void FastCodeGenerator::Apply(Expression::Context context,
}
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is pushed on the stack, and duplicated on the stack
// if necessary (for value/test and test/value contexts).
ASSERT_NE(NULL, true_label_);
@@ -495,7 +495,7 @@ void FastCodeGenerator::DoTest(Expression::Context context) {
}
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -514,14 +514,14 @@ MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
}
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
// Use destination as scratch.
MemOperand slot_operand = EmitSlotSearch(source, destination);
__ ldr(destination, slot_operand);
}
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -537,7 +537,7 @@ void FastCodeGenerator::Move(Slot* dst,
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -637,7 +637,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
}
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
__ mov(r1, Operand(pairs));
@@ -648,7 +648,7 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -666,17 +666,21 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
@@ -686,34 +690,24 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndApply(1, context, r0);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, r0);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
Apply(context, slot);
+
} else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
+ Comment cmnt(masm_, "Rewritten parameter");
ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
+ // Rewritten parameter accesses are of the form "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -745,7 +739,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
}
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -772,7 +766,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
@@ -847,7 +841,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -900,7 +894,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
@@ -909,14 +903,14 @@ void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ pop(r1);
GenericBinaryOpStub stub(op, NO_OVERWRITE);
@@ -925,11 +919,17 @@ void FastCodeGenerator::EmitBinaryOp(Token::Value op,
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
if (var->is_global()) {
+ ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
// r2, and the global object on the stack.
@@ -941,6 +941,13 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, r0);
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, r0);
+
} else if (var->slot() != NULL) {
Slot* slot = var->slot();
switch (slot->type()) {
@@ -967,6 +974,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
break;
}
Apply(context, result_register());
+
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
@@ -975,7 +983,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
}
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1011,7 +1019,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1046,7 +1054,7 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1065,7 +1073,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1087,7 +1095,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
}
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1105,7 +1113,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
}
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1176,7 +1184,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
}
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1211,7 +1219,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1246,7 +1254,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1351,13 +1359,26 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1376,7 +1397,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1393,11 +1414,15 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
- __ push(r0);
}
- // Convert to number.
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1429,12 +1454,28 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Call stub for +1/-1.
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ __ add(r0, r0, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ __ b(vs, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(r1));
+ }
__ mov(r1, Operand(expr->op() == Token::INC
? Smi::FromInt(1)
: Smi::FromInt(-1)));
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
__ CallStub(&stub);
+ __ bind(&done);
// Store the value returned in r0.
switch (assign_type) {
@@ -1483,7 +1524,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1518,7 +1559,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1633,25 +1674,25 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, r0);
}
-Register FastCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() { return r0; }
-Register FastCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() { return cp; }
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
}
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
}
@@ -1659,7 +1700,7 @@ void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
// ----------------------------------------------------------------------------
// Non-local control flow support.
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(r1));
// Store result register while executing finally block.
__ push(result_register());
@@ -1672,7 +1713,7 @@ void FastCodeGenerator::EnterFinallyBlock() {
}
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
// Restore result register from stack.
__ pop(r1);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a1f26130..b59c3f04 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -569,11 +569,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ tst(r3, Operand(kSlowCaseBitFieldMask));
__ b(ne, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 18cadaca..6c3bbbb8 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1221,6 +1221,46 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
+ and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
+ cmp(scratch1, Operand(kFlatAsciiStringTag));
+ // Ignore second test if first test failed.
+ cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ b(ne, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ ASSERT_EQ(0, kSmiTag);
+ and_(scratch1, first, Operand(second));
+ tst(scratch1, Operand(kSmiTagMask));
+ b(eq, failure);
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8f2064a7..efc5bfae 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -337,6 +337,25 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *failure);
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index ed06eb26..9dd3b932 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -63,8 +63,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
@@ -76,6 +74,8 @@ namespace internal {
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
@@ -610,6 +610,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer just above the arguments.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -653,6 +654,15 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+ __ tst(r1, Operand(r1));
+ __ mov(r1, Operand(1), LeaveCC, eq);
+ __ mov(r1, Operand(0), LeaveCC, ne);
+ __ str(r1, MemOperand(frame_pointer(), kAtStart));
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 4459859a..7de5f93d 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -123,8 +123,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
@@ -136,8 +135,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index c4b1e005..f5431512 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,9 +47,9 @@ using ::v8::internal::ReadLine;
using ::v8::internal::DeleteArray;
// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated ARM
@@ -2033,42 +2033,62 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
+ if (instr->CoprocessorField() != 0xB) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+ if (instr->HasL()) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
- if (instr->Bit(23) == 1) {
- UNIMPLEMENTED();
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
-
- } else if (instr->Bit(20) == 1) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ } else {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+ }
+ }
+ break;
+ case 0x8:
+ case 0xC: { // Load and store double to memory.
+ int rn = instr->RnField();
+ int vd = instr->VdField();
+ int offset = instr->Immed8Field();
+ if (!instr->HasU()) {
+ offset = -offset;
+ }
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->HasL()) {
+ // Load double from memory: vldr.
+ set_s_register_from_sinteger(2*vd, ReadW(address, instr));
+ set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ } else {
+ // Store double to memory: vstr.
+ WriteW(address, get_sinteger_from_s_register(2*vd), instr);
+ WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ }
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
}
- } else if (instr->Bit(21) == 1) {
- UNIMPLEMENTED();
- } else {
- UNIMPLEMENTED();
}
}
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3ce5b7a6..19737301 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
+ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/src/codegen.cc b/src/codegen.cc
index fd7e0e80..aa2a2b82 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -217,6 +217,10 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ int len = String::cast(script->source())->length();
+ Counters::total_old_codegen_source_size.Increment(len);
+ }
MakeCodePrologue(fun);
// Generate code.
const int kInitialBufferSize = 4 * KB;
@@ -344,6 +348,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
+ {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index d8e186a4..54273673 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -37,9 +37,7 @@ namespace internal {
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
-// TODO(andreip): remove this #ifdef if the page cycler confirms that all is
-// well and we can cache up to 5 script generations.
-#if 0 // defined(ANDROID)
+#if defined(ANDROID)
static const int kScriptGenerations = 1;
static const int kEvalGlobalGenerations = 1;
static const int kEvalContextualGenerations = 1;
diff --git a/src/compiler.cc b/src/compiler.cc
index b7aaedf5..fe61571f 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,7 +32,7 @@
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
@@ -42,29 +42,6 @@ namespace v8 {
namespace internal {
-class CodeGenSelector: public AstVisitor {
- public:
- enum CodeGenTag { NORMAL, FAST };
-
- CodeGenSelector() : has_supported_syntax_(true) {}
-
- CodeGenTag Select(FunctionLiteral* fun);
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* decls);
- void VisitStatements(ZoneList<Statement*>* stmts);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- bool has_supported_syntax_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
-};
-
-
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
@@ -117,12 +94,11 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
!FLAG_always_fast_compiler) {
if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
} else {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- return FastCodeGenerator::MakeCode(literal, script, is_eval);
+ FullCodeGenSyntaxChecker checker;
+ checker.Check(literal);
+ if (checker.has_supported_syntax()) {
+ return FullCodeGenerator::MakeCode(literal, script, is_eval);
}
- ASSERT(code_gen == CodeGenSelector::NORMAL);
}
}
return CodeGenerator::MakeCode(literal, script, is_eval);
@@ -493,11 +469,12 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Generate code and return it.
bool is_compiled = false;
- if (FLAG_fast_compiler && literal->try_fast_codegen()) {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- code = FastCodeGenerator::MakeCode(literal,
+ if (FLAG_always_fast_compiler ||
+ (FLAG_fast_compiler && literal->try_fast_codegen())) {
+ FullCodeGenSyntaxChecker checker;
+ checker.Check(literal);
+ if (checker.has_supported_syntax()) {
+ code = FullCodeGenerator::MakeCode(literal,
script,
false); // Not eval.
is_compiled = true;
@@ -571,418 +548,4 @@ void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
}
-CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
-
- if (scope->num_heap_slots() > 0) {
- // We support functions with a local context if they do not have
- // parameters that need to be copied into the context.
- for (int i = 0, len = scope->num_parameters(); i < len; i++) {
- Slot* slot = scope->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- if (FLAG_trace_bailout) {
- PrintF("Function has context-allocated parameters.\n");
- }
- return NORMAL;
- }
- }
- }
-
- has_supported_syntax_ = true;
- VisitDeclarations(scope->declarations());
- if (!has_supported_syntax_) return NORMAL;
-
- VisitStatements(fun->body());
- return has_supported_syntax_ ? FAST : NORMAL;
-}
-
-
-#define BAILOUT(reason) \
- do { \
- if (FLAG_trace_bailout) { \
- PrintF("%s\n", reason); \
- } \
- has_supported_syntax_ = false; \
- return; \
- } while (false)
-
-
-#define CHECK_BAILOUT \
- do { \
- if (!has_supported_syntax_) return; \
- } while (false)
-
-
-void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); i++) {
- Visit(decls->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- Visit(stmts->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitDeclaration(Declaration* decl) {
- Property* prop = decl->proxy()->AsProperty();
- if (prop != NULL) {
- Visit(prop->obj());
- Visit(prop->key());
- }
-
- if (decl->fun() != NULL) {
- Visit(decl->fun());
- }
-}
-
-
-void CodeGenSelector::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {}
-
-
-void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
- CHECK_BAILOUT;
- Visit(stmt->then_statement());
- CHECK_BAILOUT;
- Visit(stmt->else_statement());
-}
-
-
-void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {}
-
-
-void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {}
-
-
-void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {}
-
-
-void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
- BAILOUT("SwitchStatement");
-}
-
-
-void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
- Visit(stmt->cond());
- CHECK_BAILOUT;
- Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
-}
-
-
-void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
- BAILOUT("ForInStatement");
-}
-
-
-void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->catch_block());
-}
-
-
-void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Visit(stmt->try_block());
- CHECK_BAILOUT;
- Visit(stmt->finally_block());
-}
-
-
-void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {}
-
-
-void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {}
-
-
-void CodeGenSelector::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* expr) {
- BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void CodeGenSelector::VisitConditional(Conditional* expr) {
- Visit(expr->condition());
- CHECK_BAILOUT;
- Visit(expr->then_expression());
- CHECK_BAILOUT;
- Visit(expr->else_expression());
-}
-
-
-void CodeGenSelector::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
- Variable* var = expr->var();
- if (!var->is_global()) {
- Slot* slot = var->slot();
- if (slot != NULL) {
- Slot::Type type = slot->type();
- // When LOOKUP slots are enabled, some currently dead code
- // implementing unary typeof will become live.
- if (type == Slot::LOOKUP) {
- BAILOUT("Lookup slot");
- }
- } else {
- // If not global or a slot, it is a parameter rewritten to an explicit
- // property reference on the (shadow) arguments object.
-#ifdef DEBUG
- Property* property = var->AsProperty();
- ASSERT_NOT_NULL(property);
- Variable* object = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object);
- ASSERT_NOT_NULL(object->slot());
- ASSERT_NOT_NULL(property->key()->AsLiteral());
- ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
-#endif
- }
- }
-}
-
-
-void CodeGenSelector::VisitLiteral(Literal* expr) {}
-
-
-void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {}
-
-
-void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
- for (int i = 0, len = properties->length(); i < len; i++) {
- ObjectLiteral::Property* property = properties->at(i);
- if (property->IsCompileTimeValue()) continue;
- Visit(property->key());
- CHECK_BAILOUT;
- Visit(property->value());
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- if (subexpr->AsLiteral() != NULL) continue;
- if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- Visit(expr->key());
- CHECK_BAILOUT;
- Visit(expr->value());
-}
-
-
-void CodeGenSelector::VisitAssignment(Assignment* expr) {
- // We support plain non-compound assignments to properties, parameters and
- // non-context (stack-allocated) locals, and global variables.
- Token::Value op = expr->op();
- if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (var->mode() == Variable::CONST) {
- BAILOUT("Assignment to const");
- }
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("Lookup slot");
- }
- }
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("non-variable/non-property assignment");
- }
-
- Visit(expr->value());
-}
-
-
-void CodeGenSelector::VisitThrow(Throw* expr) {
- Visit(expr->exception());
-}
-
-
-void CodeGenSelector::VisitProperty(Property* expr) {
- Visit(expr->obj());
- CHECK_BAILOUT;
- Visit(expr->key());
-}
-
-
-void CodeGenSelector::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- // Check for supported calls
- if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("call to the identifier 'eval'");
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Calls to global variables are supported.
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- BAILOUT("call to a lookup slot");
- } else if (fun->AsProperty() != NULL) {
- Property* prop = fun->AsProperty();
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // Otherwise the call is supported if the function expression is.
- Visit(fun);
- }
- // Check all arguments to the call.
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCallNew(CallNew* expr) {
- Visit(expr->expression());
- CHECK_BAILOUT;
- ZoneList<Expression*>* args = expr->arguments();
- // Check all arguments to the call
- for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
- // Check for inline runtime call
- if (expr->name()->Get(0) == '_' &&
- CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("inlined runtime call");
- }
- // Check all arguments to the call. (Relies on TEMP meaning STACK.)
- for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
- CHECK_BAILOUT;
- }
-}
-
-
-void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID:
- case Token::NOT:
- case Token::TYPEOF:
- Visit(expr->expression());
- break;
- case Token::BIT_NOT:
- BAILOUT("UnaryOperataion: BIT_NOT");
- case Token::DELETE:
- BAILOUT("UnaryOperataion: DELETE");
- default:
- BAILOUT("UnaryOperataion");
- }
-}
-
-
-void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- Property* prop = expr->expression()->AsProperty();
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- // All global variables are supported.
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type == Slot::LOOKUP) {
- BAILOUT("CountOperation with lookup slot");
- }
- }
- } else if (prop != NULL) {
- Visit(prop->obj());
- CHECK_BAILOUT;
- Visit(prop->key());
- CHECK_BAILOUT;
- } else {
- // This is a throw reference error.
- BAILOUT("CountOperation non-variable/non-property expression");
- }
-}
-
-
-void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- CHECK_BAILOUT;
- Visit(expr->right());
-}
-
-
-void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
} } // namespace v8::internal
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 04fde1f9..14d8c883 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1704,7 +1704,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (global) {
// Evaluate in the global context.
response.body =
- this.exec_state_.evaluateGlobal(expression), Boolean(disable_break);
+ this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
return;
}
diff --git a/src/debug.cc b/src/debug.cc
index 34b3a6d5..fc809c56 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1695,9 +1695,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
count++;
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
index a1acd2dd..9c5ee331 100644
--- a/src/dtoa-config.c
+++ b/src/dtoa-config.c
@@ -38,7 +38,8 @@
*/
#if !(defined(__APPLE__) && defined(__MACH__)) && \
- !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
+ !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+ !defined(__sun)
#include <endian.h>
#endif
#include <math.h>
@@ -47,7 +48,7 @@
/* The floating point word order on ARM is big endian when floating point
* emulation is used, even if the byte order is little endian */
#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
- !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+ !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__sun) && \
__FLOAT_WORD_ORDER == __BIG_ENDIAN
#define IEEE_MC68k
#else
@@ -56,7 +57,7 @@
#define __MATH_H__
#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
- defined(__OpenBSD__)
+ defined(__OpenBSD__) || defined(__sun)
/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
* name of strtod. If it's included after strtod is redefined as
* gay_strtod, it will mangle the name of gay_strtod, which is
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 5c0aa0cf..90f9ddab 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -201,6 +201,11 @@ DEFINE_bool(canonicalize_object_literal_maps, true,
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
+DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
+ "Maximum number of pages in map space which still allows to encode "
+ "forwarding pointers. That's actually a constant, but it's useful "
+ "to control it with a flag for better testing.")
+
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
diff --git a/src/frames.h b/src/frames.h
index 024065ab..19860adb 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -607,11 +607,12 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance();
void Reset();
- private:
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
}
+
+ private:
bool IsValidStackAddress(Address addr) const {
return IsWithinBounds(low_bound_, high_bound_, addr);
}
diff --git a/src/fast-codegen.cc b/src/full-codegen.cc
index e90a44e2..0477ab90 100644
--- a/src/fast-codegen.cc
+++ b/src/full-codegen.cc
@@ -29,22 +29,441 @@
#include "codegen-inl.h"
#include "compiler.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "stub-cache.h"
#include "debug.h"
namespace v8 {
namespace internal {
+#define BAILOUT(reason) \
+ do { \
+ if (FLAG_trace_bailout) { \
+ PrintF("%s\n", reason); \
+ } \
+ has_supported_syntax_ = false; \
+ return; \
+ } while (false)
+
+
+#define CHECK_BAILOUT \
+ do { \
+ if (!has_supported_syntax_) return; \
+ } while (false)
+
+
+void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
+ Scope* scope = fun->scope();
+
+ if (scope->num_heap_slots() > 0) {
+ // We support functions with a local context if they do not have
+ // parameters that need to be copied into the context.
+ for (int i = 0, len = scope->num_parameters(); i < len; i++) {
+ Slot* slot = scope->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ BAILOUT("Function has context-allocated parameters.");
+ }
+ }
+ }
+
+ VisitDeclarations(scope->declarations());
+ CHECK_BAILOUT;
+
+ VisitStatements(fun->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclarations(
+ ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); i++) {
+ Visit(decls->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ Visit(stmts->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
+ Property* prop = decl->proxy()->AsProperty();
+ if (prop != NULL) {
+ Visit(prop->obj());
+ Visit(prop->key());
+ }
+
+ if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+ CHECK_BAILOUT;
+ Visit(stmt->then_statement());
+ CHECK_BAILOUT;
+ Visit(stmt->else_statement());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+ BAILOUT("SwitchStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
+ if (!FLAG_always_fast_compiler) BAILOUT("ForStatement");
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ CHECK_BAILOUT;
+ }
+ if (stmt->cond() != NULL) {
+ Visit(stmt->cond());
+ CHECK_BAILOUT;
+ }
+ Visit(stmt->body());
+ if (stmt->next() != NULL) {
+ CHECK_BAILOUT;
+ Visit(stmt->next());
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
+ BAILOUT("ForInStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->catch_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->finally_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
+ Visit(expr->condition());
+ CHECK_BAILOUT;
+ Visit(expr->then_expression());
+ CHECK_BAILOUT;
+ Visit(expr->else_expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+ // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (property->IsCompileTimeValue()) continue;
+ Visit(property->key());
+ CHECK_BAILOUT;
+ Visit(property->value());
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ if (subexpr->AsLiteral() != NULL) continue;
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ Visit(subexpr);
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ Visit(expr->key());
+ CHECK_BAILOUT;
+ Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
+ Token::Value op = expr->op();
+ if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
+ // All other variables are supported.
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("non-variable/non-property assignment");
+ }
+
+ Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
+ Visit(expr->exception());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
+ Visit(expr->obj());
+ CHECK_BAILOUT;
+ Visit(expr->key());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
+ Expression* fun = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ // Check for supported calls
+ if (var != NULL && var->is_possibly_eval()) {
+ BAILOUT("call to the identifier 'eval'");
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Calls to global variables are supported.
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ BAILOUT("call to a lookup slot");
+ } else if (fun->AsProperty() != NULL) {
+ Property* prop = fun->AsProperty();
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // Otherwise the call is supported if the function expression is.
+ Visit(fun);
+ }
+ // Check all arguments to the call.
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
+ Visit(expr->expression());
+ CHECK_BAILOUT;
+ ZoneList<Expression*>* args = expr->arguments();
+ // Check all arguments to the call
+ for (int i = 0; i < args->length(); i++) {
+ Visit(args->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
+ // Check for inline runtime call
+ if (expr->name()->Get(0) == '_' &&
+ CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+ BAILOUT("inlined runtime call");
+ }
+ // Check all arguments to the call. (Relies on TEMP meaning STACK.)
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ Visit(expr->arguments()->at(i));
+ CHECK_BAILOUT;
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::ADD:
+ case Token::NOT:
+ case Token::TYPEOF:
+ case Token::VOID:
+ Visit(expr->expression());
+ break;
+ case Token::BIT_NOT:
+ BAILOUT("UnaryOperation: BIT_NOT");
+ case Token::DELETE:
+ BAILOUT("UnaryOperation: DELETE");
+ case Token::SUB:
+ BAILOUT("UnaryOperation: SUB");
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->expression()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("CountOperation with lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("CountOperation non-variable/non-property expression");
+ }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
+ // Supported.
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
#define __ ACCESS_MASM(masm())
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ int len = String::cast(script->source())->length();
+ Counters::total_full_codegen_source_size.Increment(len);
+ }
CodeGenerator::MakeCodePrologue(fun);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
- FastCodeGenerator cgen(&masm, script, is_eval);
+ FullCodeGenerator cgen(&masm, script, is_eval);
cgen.Generate(fun);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
@@ -55,7 +474,7 @@ Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
}
-int FastCodeGenerator::SlotOffset(Slot* slot) {
+int FullCodeGenerator::SlotOffset(Slot* slot) {
ASSERT(slot != NULL);
// Offset is negative because higher indexes are at lower addresses.
int offset = -slot->index() * kPointerSize;
@@ -75,7 +494,7 @@ int FastCodeGenerator::SlotOffset(Slot* slot) {
}
-void FastCodeGenerator::VisitDeclarations(
+void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
@@ -129,42 +548,42 @@ void FastCodeGenerator::VisitDeclarations(
}
-void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
CodeGenerator::RecordPositions(masm_, fun->start_position());
}
}
-void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
CodeGenerator::RecordPositions(masm_, fun->end_position());
}
}
-void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
+void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
if (FLAG_debug_info) {
CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
}
}
-void FastCodeGenerator::SetStatementPosition(int pos) {
+void FullCodeGenerator::SetStatementPosition(int pos) {
if (FLAG_debug_info) {
CodeGenerator::RecordPositions(masm_, pos);
}
}
-void FastCodeGenerator::SetSourcePosition(int pos) {
+void FullCodeGenerator::SetSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
masm_->RecordPosition(pos);
}
}
-void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done;
// Set up the appropriate context for the left subexpression based
@@ -232,7 +651,7 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
+void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
@@ -241,20 +660,20 @@ void FastCodeGenerator::VisitBlock(Block* stmt) {
}
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
VisitForEffect(stmt->expression());
}
-void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
Comment cmnt(masm_, "[ EmptyStatement");
SetStatementPosition(stmt);
}
-void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
Comment cmnt(masm_, "[ IfStatement");
SetStatementPosition(stmt);
Label then_part, else_part, done;
@@ -273,7 +692,7 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
}
-void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
Comment cmnt(masm_, "[ ContinueStatement");
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
@@ -289,7 +708,7 @@ void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
}
-void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
Comment cmnt(masm_, "[ BreakStatement");
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
@@ -305,7 +724,7 @@ void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
Expression* expr = stmt->expression();
@@ -324,7 +743,7 @@ void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
}
-void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
Comment cmnt(masm_, "[ WithEnterStatement");
SetStatementPosition(stmt);
@@ -342,7 +761,7 @@ void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
}
-void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
Comment cmnt(masm_, "[ WithExitStatement");
SetStatementPosition(stmt);
@@ -353,12 +772,12 @@ void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
}
-void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
@@ -388,7 +807,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
}
-void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
@@ -419,17 +838,58 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
-void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
+void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
+ Comment cmnt(masm_, "[ ForStatement");
+ SetStatementPosition(stmt);
+ Label test, body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ }
+
+ increment_loop_depth();
+ // Emit the test at the bottom of the loop (even if empty).
+ __ jmp(&test);
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(loop_statement.continue_target());
+
+ SetStatementPosition(stmt);
+ if (stmt->next() != NULL) {
+ Visit(stmt->next());
+ }
+
+ __ bind(&test);
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ if (stmt->cond() != NULL) {
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+ } else {
+ __ jmp(&body);
+ }
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(loop_statement.break_target());
+ decrement_loop_depth();
}
-void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
SetStatementPosition(stmt);
// The try block adds a handler to the exception handler chain
@@ -472,7 +932,7 @@ void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
-void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Comment cmnt(masm_, "[ TryFinallyStatement");
SetStatementPosition(stmt);
// Try finally is compiled by setting up a try-handler on the stack while
@@ -536,7 +996,7 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
-void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt);
@@ -546,13 +1006,13 @@ void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
UNREACHABLE();
}
-void FastCodeGenerator::VisitConditional(Conditional* expr) {
+void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
VisitForControl(expr->condition(), &true_case, &false_case);
@@ -573,20 +1033,21 @@ void FastCodeGenerator::VisitConditional(Conditional* expr) {
}
-void FastCodeGenerator::VisitSlot(Slot* expr) {
+void FullCodeGenerator::VisitSlot(Slot* expr) {
// Slots do not appear directly in the AST.
UNREACHABLE();
}
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
+void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
Apply(context_, expr);
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() != Token::INIT_CONST);
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -637,7 +1098,7 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Expression* rhs = expr->value();
VisitForValue(rhs, kAccumulator);
- // If we have a compount assignment: Apply operator.
+ // If we have a compound assignment: Apply operator.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kAccumulator;
@@ -664,7 +1125,7 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
}
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -676,7 +1137,7 @@ void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
}
-void FastCodeGenerator::VisitThrow(Throw* expr) {
+void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForValue(expr->exception(), kStack);
__ CallRuntime(Runtime::kThrow, 1);
@@ -684,7 +1145,7 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
}
-int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
+int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
@@ -693,7 +1154,7 @@ int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
}
-int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
+int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
diff --git a/src/fast-codegen.h b/src/full-codegen.h
index c26e0f30..35ed25f1 100644
--- a/src/fast-codegen.h
+++ b/src/full-codegen.h
@@ -35,12 +35,35 @@
namespace v8 {
namespace internal {
+class FullCodeGenSyntaxChecker: public AstVisitor {
+ public:
+ FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
+
+ void Check(FunctionLiteral* fun);
+
+ bool has_supported_syntax() { return has_supported_syntax_; }
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* decls);
+ void VisitStatements(ZoneList<Statement*>* stmts);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ bool has_supported_syntax_;
+
+ DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
+};
+
+
// -----------------------------------------------------------------------------
-// Fast code generator.
+// Full code generator.
-class FastCodeGenerator: public AstVisitor {
+class FullCodeGenerator: public AstVisitor {
public:
- FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+ FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
: masm_(masm),
function_(NULL),
script_(script),
@@ -68,7 +91,7 @@ class FastCodeGenerator: public AstVisitor {
class NestedStatement BASE_EMBEDDED {
public:
- explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
+ explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
// Link into codegen's nesting stack.
previous_ = codegen->nesting_stack_;
codegen->nesting_stack_ = this;
@@ -106,14 +129,14 @@ class FastCodeGenerator: public AstVisitor {
protected:
MacroAssembler* masm() { return codegen_->masm(); }
private:
- FastCodeGenerator* codegen_;
+ FullCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
};
class Breakable : public NestedStatement {
public:
- Breakable(FastCodeGenerator* codegen,
+ Breakable(FullCodeGenerator* codegen,
BreakableStatement* break_target)
: NestedStatement(codegen),
target_(break_target) {}
@@ -132,7 +155,7 @@ class FastCodeGenerator: public AstVisitor {
class Iteration : public Breakable {
public:
- Iteration(FastCodeGenerator* codegen,
+ Iteration(FullCodeGenerator* codegen,
IterationStatement* iteration_statement)
: Breakable(codegen, iteration_statement) {}
virtual ~Iteration() {}
@@ -149,7 +172,7 @@ class FastCodeGenerator: public AstVisitor {
// The environment inside the try block of a try/catch statement.
class TryCatch : public NestedStatement {
public:
- explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
+ explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
: NestedStatement(codegen), catch_entry_(catch_entry) { }
virtual ~TryCatch() {}
virtual TryCatch* AsTryCatch() { return this; }
@@ -163,7 +186,7 @@ class FastCodeGenerator: public AstVisitor {
// The environment inside the try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
- explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
+ explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) { }
virtual ~TryFinally() {}
virtual TryFinally* AsTryFinally() { return this; }
@@ -179,7 +202,7 @@ class FastCodeGenerator: public AstVisitor {
// the block's parameters from the stack.
class Finally : public NestedStatement {
public:
- explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
+ explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
virtual Finally* AsFinally() { return this; }
virtual int Exit(int stack_depth) {
@@ -196,7 +219,7 @@ class FastCodeGenerator: public AstVisitor {
// the block's temporary storage from the stack.
class ForIn : public Iteration {
public:
- ForIn(FastCodeGenerator* codegen,
+ ForIn(FullCodeGenerator* codegen,
ForInStatement* statement)
: Iteration(codegen, statement) { }
virtual ~ForIn() {}
@@ -222,7 +245,10 @@ class FastCodeGenerator: public AstVisitor {
// or on top of the stack) into the result expected according to an
// expression context.
void Apply(Expression::Context context, Register reg);
+
+ // Slot cannot have type Slot::LOOKUP.
void Apply(Expression::Context context, Slot* slot);
+
void Apply(Expression::Context context, Literal* lit);
void ApplyTOS(Expression::Context context);
@@ -410,7 +436,7 @@ class FastCodeGenerator: public AstVisitor {
friend class NestedStatement;
- DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
+ DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};
diff --git a/src/handles.cc b/src/handles.cc
index d551e21c..3156670d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -681,14 +681,18 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
// Compile the source information to a code object.
Handle<SharedFunctionInfo> shared(function->shared());
- return CompileLazyShared(shared, flag, 0);
+ bool result = CompileLazyShared(shared, flag, 0);
+ LOG(FunctionCreateEvent(*function));
+ return result;
}
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
// Compile the source information to a code object.
Handle<SharedFunctionInfo> shared(function->shared());
- return CompileLazyShared(shared, flag, 1);
+ bool result = CompileLazyShared(shared, flag, 1);
+ LOG(FunctionCreateEvent(*function));
+ return result;
}
OptimizedObjectForAddingMultipleProperties::
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index b6150556..3cb65eee 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -625,8 +625,7 @@ void HeapProfiler::WriteSample() {
ConstructorHeapProfile js_cons_profile;
RetainerHeapProfile js_retainer_profile;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj, info);
js_cons_profile.CollectStats(obj);
js_retainer_profile.CollectStats(obj);
diff --git a/src/heap.cc b/src/heap.cc
index fba2e87c..6be1dafe 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -76,8 +76,8 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-int Heap::max_semispace_size_ = 512*KB;
-int Heap::max_old_generation_size_ = 128*MB;
+int Heap::max_semispace_size_ = 2*MB;
+int Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB;
size_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64)
@@ -327,7 +327,7 @@ void Heap::GarbageCollectionPrologue() {
int Heap::SizeOfObjects() {
int total = 0;
AllSpaces spaces;
- while (Space* space = spaces.next()) {
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Size();
}
return total;
@@ -732,13 +732,14 @@ static void VerifyNonPointerSpacePointers() {
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(Heap::code_space());
- while (code_it.has_next()) {
- HeapObject* object = code_it.next();
+ for (HeapObject* object = code_it.next();
+ object != NULL; object = code_it.next())
object->Iterate(&v);
- }
HeapObjectIterator data_it(Heap::old_data_space());
- while (data_it.has_next()) data_it.next()->Iterate(&v);
+ for (HeapObject* object = data_it.next();
+ object != NULL; object = data_it.next())
+ object->Iterate(&v);
}
#endif
@@ -804,8 +805,8 @@ void Heap::Scavenge() {
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- while (cell_iterator.has_next()) {
- HeapObject* cell = cell_iterator.next();
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL; cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1013,13 +1014,15 @@ void Heap::RebuildRSets() {
void Heap::RebuildRSets(PagedSpace* space) {
HeapObjectIterator it(space);
- while (it.has_next()) Heap::UpdateRSet(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ Heap::UpdateRSet(obj);
}
void Heap::RebuildRSets(LargeObjectSpace* space) {
LargeObjectIterator it(space);
- while (it.has_next()) Heap::UpdateRSet(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ Heap::UpdateRSet(obj);
}
@@ -1203,7 +1206,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2(0);
+ map->set_bit_field2(1 << Map::kIsExtensible);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -3106,7 +3109,8 @@ void Heap::Print() {
if (!HasBeenSetup()) return;
Top::PrintStack();
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Print();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Print();
}
@@ -3340,6 +3344,11 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
+ IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
@@ -3394,6 +3403,20 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
v->Synchronize("threadmanager");
+
+ // Iterate over the pointers the Serialization/Deserialization code is
+ // holding.
+ // During garbage collection this keeps the partial snapshot cache alive.
+ // During deserialization of the startup snapshot this creates the partial
+ // snapshot cache and deserializes the objects it refers to. During
+ // serialization this does nothing, since the partial snapshot cache is
+ // empty. However the next thing we do is create the partial snapshot,
+ // filling up the partial snapshot cache with objects it needs as we go.
+ SerializerDeserializer::Iterate(v);
+ // We don't do a v->Synchronize call here, because in debug mode that will
+ // output a flag to the snapshot. However at this point the serializer and
+ // deserializer are deliberately a little unsynchronized (see above) so the
+ // checking of the sync flag in the snapshot would fail.
}
@@ -3544,7 +3567,8 @@ bool Heap::Setup(bool create_heap_objects) {
// Initialize map space.
map_space_ = new MapSpace(FLAG_use_big_map_space
? max_old_generation_size_
- : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
+ : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+ FLAG_max_map_space_pages,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
@@ -3647,7 +3671,8 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) space->Shrink();
+ for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Shrink();
}
@@ -3656,7 +3681,8 @@ void Heap::Shrink() {
void Heap::Protect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Protect();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Protect();
}
}
@@ -3664,7 +3690,8 @@ void Heap::Protect() {
void Heap::Unprotect() {
if (HasBeenSetup()) {
AllSpaces spaces;
- while (Space* space = spaces.next()) space->Unprotect();
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ space->Unprotect();
}
}
@@ -3836,34 +3863,25 @@ void HeapIterator::Shutdown() {
}
-bool HeapIterator::has_next() {
+HeapObject* HeapIterator::next() {
// No iterator means we are done.
- if (object_iterator_ == NULL) return false;
+ if (object_iterator_ == NULL) return NULL;
- if (object_iterator_->has_next_object()) {
+ if (HeapObject* obj = object_iterator_->next_object()) {
// If the current iterator has more objects we are fine.
- return true;
+ return obj;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
object_iterator_ = space_iterator_->next();
- if (object_iterator_->has_next_object()) {
- return true;
+ if (HeapObject* obj = object_iterator_->next_object()) {
+ return obj;
}
}
}
// Done with the last space.
object_iterator_ = NULL;
- return false;
-}
-
-
-HeapObject* HeapIterator::next() {
- if (has_next()) {
- return object_iterator_->next_object();
- } else {
- return NULL;
- }
+ return NULL;
}
diff --git a/src/heap.h b/src/heap.h
index 1f044441..0dd20c08 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -690,6 +690,8 @@ class Heap : public AllStatic {
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over all the other roots in the heap.
+ static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -1290,7 +1292,6 @@ class HeapIterator BASE_EMBEDDED {
explicit HeapIterator();
virtual ~HeapIterator();
- bool has_next();
HeapObject* next();
void reset();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 2cf469ae..dc017ae3 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -860,6 +860,24 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
}
+void Assembler::cmpb(const Operand& dst, Register src) {
+ ASSERT(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x38);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(Register dst, const Operand& src) {
+ ASSERT(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x3A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::cmpw(const Operand& op, Immediate imm16) {
ASSERT(imm16.is_int16());
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d675ecf5..da27fd09 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -559,6 +559,8 @@ class Assembler : public Malloced {
void and_(const Operand& dst, const Immediate& x);
void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register src, const Operand& dst);
+ void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 240f4da3..121e1552 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -639,15 +639,22 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
return frame_->Pop();
}
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -697,6 +704,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -743,6 +751,12 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
class FloatingPointHelper : public AllStatic {
public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
// operand in register number. Returns operand as floating point number
@@ -750,9 +764,16 @@ class FloatingPointHelper : public AllStatic {
static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
- // floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -768,7 +789,11 @@ class FloatingPointHelper : public AllStatic {
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
- static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
};
@@ -913,31 +938,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -971,7 +971,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -980,33 +979,31 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
- }
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
+ }
}
+ frame_->Push(&answer);
}
@@ -1093,10 +1090,11 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need eax as the quotient register, edx as the remainder
@@ -1218,7 +1216,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&quotient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and
@@ -1234,9 +1232,10 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -1257,7 +1256,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Spill(ecx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -1321,8 +1320,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -1331,7 +1330,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -1353,12 +1352,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ mov(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
+ __ add(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
+ __ sub(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
@@ -1406,7 +1405,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -1575,36 +1575,34 @@ void DeferredInlineSmiSub::Generate() {
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
-
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -1627,13 +1625,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- Result answer; // Only allocate a new register if reversed.
if (reversed) {
// The reversed case is only hit when the right operand is not a
// constant.
@@ -1661,15 +1658,14 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
break;
}
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1689,21 +1685,21 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ and_(operand->reg(), ~kSmiTagMask);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1724,7 +1720,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->Unuse();
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
}
break;
@@ -1749,7 +1744,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
operand->Unuse();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
@@ -1765,7 +1760,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1783,10 +1777,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1808,7 +1802,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -1847,7 +1840,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -1873,7 +1866,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -1881,16 +1874,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -2311,20 +2305,29 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -2334,6 +2337,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -2341,143 +2349,151 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
- probe.Unuse();
- slow.Branch(not_equal);
- }
-
- if (try_lazy) {
- JumpTarget build_args;
-
- // Get rid of the arguments object probe.
- frame_->Drop();
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
- // Check that the receiver really is a JavaScript object.
- { frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
+ // Check that the receiver really is a JavaScript object.
+ __ mov(eax, Operand(esp, 0));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
- build_args.Branch(less);
- }
-
- // Verify that we're invoking Function.prototype.apply.
- { frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- __ test(apply.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ mov(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &build_args);
+
+ // Check that applicand.apply is Function.prototype.apply.
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
Immediate(apply_code));
- build_args.Branch(not_equal);
- }
-
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- build_args.Branch(zero);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- build_args.Branch(not_equal);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- build_args.Branch(above);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ bind(&loop);
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
- __ dec(ecx);
- __ jmp(&loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(eax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // ecx is a small non-negative integer, due to the test above.
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(eax);
+ // Stack now has 1 element:
+ // esp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
+ }
+
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ __ mov(Operand(esp, 3 * kPointerSize), ebx);
+
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // esp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -3517,17 +3533,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
frame_->Drop();
}
}
@@ -3535,10 +3547,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -4588,9 +4596,12 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -4612,12 +4623,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -4643,6 +4669,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -4654,17 +4681,20 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -4827,7 +4857,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -4860,16 +4890,21 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Load(property->obj());
+ Load(property->key());
+ Result function = EmitKeyedLoad(false);
+ frame_->Drop(); // Key.
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
@@ -5183,6 +5218,26 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kBitFieldOffset));
+ __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ temp.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5760,7 +5815,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -6363,6 +6420,114 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ deferred->Branch(not_equal);
+
+ // Check that the key is a smi.
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ mov(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ mov(index.reg(), key.reg());
+ __ SmiUntag(index.reg());
+ __ cmp(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is eax, the we can reuse that one because the value
+ // coming from the deferred code will be in eax.
+ Result value = index;
+ __ mov(value.reg(), Operand(elements.reg(),
+ index.reg(),
+ times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -6475,121 +6640,21 @@ void Reference::GetValue() {
}
case KEYED: {
- Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
-
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ mov(index.reg(), key.reg());
- __ SmiUntag(index.reg());
- __ cmp(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is eax, the we can reuse that one because the value
- // coming from the deferred code will be in eax.
- Result value = index;
- __ mov(value.reg(), Operand(elements.reg(),
- index.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -6623,6 +6688,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -6752,6 +6820,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
+ cgen_->UnloadReference(this);
}
@@ -7062,143 +7131,335 @@ void GenericBinaryOpStub::GenerateCall(
}
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // Perform fast-case smi code for the operation (eax <op> ebx) and
- // leave result in register eax.
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax));
+void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
switch (op_) {
- case Token::ADD:
- __ add(eax, Operand(ebx)); // add optimistically
- __ j(overflow, slow, not_taken);
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
case Token::SUB:
- __ sub(eax, Operand(ebx)); // subtract optimistically
- __ j(overflow, slow, not_taken);
- break;
-
+ case Token::MUL:
case Token::DIV:
case Token::MOD:
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(ebx, Operand(ebx));
- __ j(zero, slow, not_taken);
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
break;
default:
- // Fall-through to smi check.
break;
}
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, slow, not_taken);
+ // 3. Perform the smi check of the operands.
+ ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, &not_smis, not_taken);
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
case Token::SUB:
- // Do nothing here.
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
break;
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below if not the case
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
// Remove tag from one of the operands (but keep sign).
- __ SmiUntag(eax);
+ __ SmiUntag(right);
// Do multiplication.
- __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
- // Go slow on overflows.
- __ j(overflow, slow, not_taken);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
break;
case Token::DIV:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for the corner case of dividing the most negative smi
- // by -1. We cannot use the overflow flag, since it is not set
- // by idiv instruction.
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
- __ j(equal, slow);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
__ test(edx, Operand(edx));
- __ j(not_zero, slow);
+ __ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
break;
case Token::MOD:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for negative zero result.
- __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &not_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
// Move remainder to register eax.
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
break;
- case Token::BIT_OR:
- __ or_(eax, Operand(ebx));
- break;
+ default:
+ UNREACHABLE();
+ }
- case Token::BIT_AND:
- __ and_(eax, Operand(ebx));
- break;
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
- case Token::BIT_XOR:
- __ xor_(eax, Operand(ebx));
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
break;
+ }
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Move the second operand into register ecx.
- __ mov(ecx, Operand(ebx));
- // Remove tags from operands (but keep sign).
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- // Perform the operation.
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
switch (op_) {
- case Token::SAR:
- __ sar_cl(eax);
- // No checks of result necessary
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
break;
- case Token::SHR:
- __ shr_cl(eax);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
break;
- case Token::SHL:
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(sign, slow, not_taken);
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
break;
- default:
- UNREACHABLE();
}
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(&not_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
break;
default:
- UNREACHABLE();
break;
}
}
@@ -7213,30 +7474,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// case smi code is not generated by the caller. Generating it here will speed
// up common operations.
if (HasSmiCodeInStub()) {
- Label slow;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- GenerateSmiCode(masm, &slow);
- GenerateReturn(masm);
- // Too bad. The fast case smi code didn't succeed.
- __ bind(&slow);
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ GenerateLoadArguments(masm);
}
- // Make sure the arguments are in edx and eax.
- GenerateLoadArguments(masm);
-
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
- // eax: y
- // edx: x
-
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7245,59 +7496,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE: {
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- }
- default: UNREACHABLE();
- }
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
@@ -7305,8 +7512,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
}
}
case Token::MOD: {
@@ -7319,12 +7531,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label non_smi_result, skip_allocation;
- Label operand_conversion_failure;
- FloatingPointHelper::LoadAsIntegers(
- masm,
- use_sse3_,
- &operand_conversion_failure);
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7337,7 +7545,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &non_smi_result);
+ __ j(not_zero, &call_runtime);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
@@ -7352,6 +7560,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -7380,15 +7589,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
GenerateReturn(masm);
}
-
- // Go to runtime for non-number inputs.
- __ bind(&operand_conversion_failure);
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
- }
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7398,9 +7598,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgumentsInRegisters()) {
+ if (HasArgsInRegisters()) {
__ pop(ecx);
- if (HasArgumentsReversed()) {
+ if (HasArgsReversed()) {
__ push(eax);
__ push(edx);
} else {
@@ -7414,17 +7614,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Test for string arguments before calling runtime.
Label not_strings, not_string1, string1;
Result answer;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &not_string1);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_string1);
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
+ // First argument is a string, test second.
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &string1);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
@@ -7433,17 +7631,25 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_RIGHT :
+ Builtins::STRING_ADD_LEFT,
+ JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
- __ test(edx, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &not_strings);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &not_strings);
// Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_LEFT :
+ Builtins::STRING_ADD_RIGHT,
+ JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
@@ -7455,7 +7661,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -7486,9 +7692,57 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
}
@@ -7498,7 +7752,7 @@ void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
@@ -7514,6 +7768,7 @@ void IntegerConvert(MacroAssembler* masm,
Register source,
bool use_sse3,
Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
@@ -7716,7 +7971,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
@@ -7748,16 +8003,40 @@ void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch) {
+ Register scratch,
+ ArgLocation arg_location) {
Label load_smi_1, load_smi_2, done_load_1, done;
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_1, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_2, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
@@ -7780,6 +8059,24 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -8114,10 +8411,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// esp[12]: subject string
// esp[16]: JSRegExp object
- Label runtime;
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
// Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -8153,7 +8464,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the second argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -8165,7 +8476,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
- __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &runtime);
// Check that it is not greater than the subject string length.
@@ -8176,7 +8487,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
@@ -8194,38 +8505,74 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(greater, &runtime);
// ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string (only support
- // flat ascii strings).
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ // Check the representation and encoding of the subject string.
+ Label seq_string, seq_two_byte_string, check_code;
+ const int kStringRepresentationEncodingMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ and_(ebx, kStringRepresentationEncodingMask);
+ // First check for sequential string.
+ ASSERT_EQ(0, kStringTag);
+ ASSERT_EQ(0, kSeqStringTag);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string.
+ __ mov(edx, ebx);
+ __ and_(edx, kStringRepresentationMask);
+ __ cmp(edx, kConsStringTag);
__ j(not_equal, &runtime);
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Immediate(Handle<String>(Heap::empty_string())));
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ and_(ebx, kStringRepresentationEncodingMask);
- // ecx: RegExp data (FixedArray)
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(eax, Operand(eax));
- __ j(zero, &runtime, not_taken);
-
+ __ bind(&seq_string);
+ // eax: subject string (sequential either ascii to two byte)
+ // ebx: suject string type & kStringRepresentationEncodingMask
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
- // it has the field contains a code object otherwise it contains the hole.
+ // it has, the field contains a code object otherwise it contains the hole.
+ __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(equal, &seq_two_byte_string);
+#ifdef DEBUG
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ Check(equal, "Expected sequential ascii string");
+#endif
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for If it has, the field
+ // contains a code object otherwise it contains the hole.
__ CmpObjectType(edx, CODE_TYPE, ebx);
__ j(not_equal, &runtime);
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
- __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
- __ SmiUntag(ebx); // Previous index from sim.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ mov(ecx, Operand(esp, kJSRegExpOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
// eax: subject string
// ebx: previous index
@@ -8233,37 +8580,40 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // Argument 8: Indicate that this is a direct call from JavaScript.
+ // Argument 7: Indicate that this is a direct call from JavaScript.
__ push(Immediate(1));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ push(ecx);
- // Argument 6: At start of string?
- __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
- __ test(ebx, Operand(ebx));
- __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
- __ push(ecx);
-
// Argument 5: static offsets vector buffer.
__ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
- // Argument 4: End of string data.
- __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
- __ add(ecx, Operand(eax));
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ecx);
-
- // Argument 3: Start of string data.
- __ mov(ecx, ebx);
- __ add(ebx, Operand(eax)); // String is ASCII.
- __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label push_two_byte, push_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &push_two_byte);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+ __ jmp(&push_rest);
+
+ __ bind(&push_two_byte);
+ ASSERT(kShortSize == 2);
+ __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+
+ __ bind(&push_rest);
// Argument 2: Previous index.
- __ push(ecx);
+ __ push(ebx);
// Argument 1: Subject string.
__ push(eax);
@@ -8272,7 +8622,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(Operand(edx));
// Remove arguments.
- __ add(Operand(esp), Immediate(8 * kPointerSize));
+ __ add(Operand(esp), Immediate(7 * kPointerSize));
// Check the result.
Label success;
@@ -8299,7 +8649,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Load RegExp data.
__ bind(&success);
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -8307,7 +8657,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
// ebx: last_match_info backing store (FixedArray)
@@ -8317,11 +8667,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
@@ -8335,7 +8685,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@@ -8362,7 +8712,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
@@ -8520,7 +8870,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
// Jump to builtin for NaN.
@@ -8582,30 +8932,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&check_for_strings);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_builtin);
-
- // Load instance type for both objects.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ecx, kFlatAsciiString);
- __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9659,79 +9986,76 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3) {
- Label compare_lengths, compare_lengths_1;
-
- // Find minimum length. If either length is zero just compare lengths.
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+ // Find minimum length.
+ Label left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ test(scratch1, Operand(scratch1));
- __ j(zero, &compare_lengths_1);
- __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
- __ test(scratch2, Operand(scratch2));
- __ j(zero, &compare_lengths_1);
- __ cmp(scratch1, Operand(scratch2));
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(greater, scratch1, Operand(scratch2));
- } else {
- Label l;
- __ j(less, &l);
- __ mov(scratch1, scratch2);
- __ bind(&l);
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
}
- Label result_greater, result_less;
- Label loop;
- // Compare next character.
- __ mov(scratch3, Immediate(-1)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- Label character_compare_done;
- __ add(Operand(scratch3), Immediate(1));
- __ mov_b(scratch2, Operand(left,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ subb(scratch2, Operand(right,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ j(not_equal, &character_compare_done);
- __ sub(Operand(scratch1), Immediate(1));
- __ j(not_zero, &loop);
- // If min length characters match compare lengths otherwise last character
- // compare is the result.
- __ bind(&character_compare_done);
- __ j(equal, &compare_lengths);
- __ j(less, &result_less);
- __ jmp(&result_greater);
-
- // Compare lengths.
- Label result_not_equal;
+ // Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ bind(&compare_lengths_1);
- __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+ __ test(length_delta, Operand(length_delta));
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(2 * kPointerSize);
+
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
- __ bind(&result_less);
- __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
- __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(2 * kPointerSize);
}
@@ -9752,41 +10076,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &not_same);
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
__ bind(&not_same);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
-
- // Load instance type for both strings.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
- __ bind(&non_ascii_flat);
-
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 56cf978d..a81a7d1d 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,57 +43,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -420,6 +433,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -444,20 +462,20 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(AstNode* node,
Condition cc,
@@ -479,10 +497,10 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -517,6 +535,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -613,8 +632,8 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -651,6 +670,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+ Result GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right);
+
private:
Token::Value op_;
OverwriteMode mode_;
@@ -697,11 +721,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
bool ArgsInRegistersSupported() {
- return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -710,8 +734,8 @@ class GenericBinaryOpStub: public CodeStub {
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgumentsInRegisters() { return args_in_registers_; }
- bool HasArgumentsReversed() { return args_reversed_; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
};
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 1fbaa3ce..cb500d56 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -53,23 +53,25 @@ struct ByteMnemonic {
static ByteMnemonic two_operands_instr[] = {
{0x03, "add", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
{0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index f485d9e2..fbeec3f5 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -160,7 +160,7 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -193,7 +193,7 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -236,7 +236,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -279,7 +279,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -320,7 +320,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
}
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -361,7 +361,7 @@ void FastCodeGenerator::ApplyTOS(Expression::Context context) {
}
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -413,7 +413,7 @@ void FastCodeGenerator::DropAndApply(int count,
}
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -478,7 +478,7 @@ void FastCodeGenerator::Apply(Expression::Context context,
}
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -612,7 +612,7 @@ void FastCodeGenerator::DoTest(Expression::Context context) {
}
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -631,13 +631,13 @@ MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
}
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ mov(destination, location);
}
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -653,7 +653,7 @@ void FastCodeGenerator::Move(Slot* dst,
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -751,7 +751,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
}
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
__ push(Immediate(pairs));
@@ -761,7 +761,7 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -779,17 +779,21 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
@@ -803,34 +807,24 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// (eg, push/pop elimination).
__ nop();
DropAndApply(1, context, eax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
Apply(context, slot);
+
} else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
+ Comment cmnt(masm_, "Rewritten parameter");
ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
+ // Rewritten parameter accesses are of the form "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -862,7 +856,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
}
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -889,7 +883,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -958,7 +952,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1008,7 +1002,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
@@ -1018,7 +1012,7 @@ void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1026,7 +1020,7 @@ void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1037,11 +1031,17 @@ void FastCodeGenerator::EmitBinaryOp(Token::Value op,
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
if (var->is_global()) {
+ ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
// ecx, and the global object on the stack.
@@ -1053,8 +1053,14 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
// Overwrite the receiver on the stack with the result if needed.
DropAndApply(1, context, eax);
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1086,7 +1092,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
}
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1177,7 +1183,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1198,7 +1204,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
}
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1215,7 +1221,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
}
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1286,7 +1292,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
}
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1321,7 +1327,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1353,7 +1359,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1457,13 +1463,26 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1482,7 +1501,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1498,11 +1517,15 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
- __ push(eax);
}
- // Convert to number.
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1534,13 +1557,33 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ }
// Call stub for +1/-1.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(1)));
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
+ stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+ __ bind(&done);
// Store the value returned in eax.
switch (assign_type) {
@@ -1595,7 +1638,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1630,7 +1673,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1745,25 +1788,25 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
}
-Register FastCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::result_register() { return eax; }
-Register FastCodeGenerator::context_register() { return esi; }
+Register FullCodeGenerator::context_register() { return esi; }
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
}
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
}
@@ -1771,7 +1814,7 @@ void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
// ----------------------------------------------------------------------------
// Non-local control flow support.
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
@@ -1785,7 +1828,7 @@ void FastCodeGenerator::EnterFinallyBlock() {
}
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 5658605a..ebc2cfa9 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -244,11 +244,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d7c7d3a2..a16c1033 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1454,6 +1454,36 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
+void MacroAssembler::IncrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ IncrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ DecrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -1495,6 +1525,38 @@ void MacroAssembler::Abort(const char* msg) {
}
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ mov(scratch1, Operand(object1));
+ and_(scratch1, Operand(object2));
+ test(scratch1, Immediate(kSmiTagMask));
+ j(zero, failure);
+
+ // Load instance type for both strings.
+ mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+ mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+ movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ // Interleave bits from both instance types and compare them in one check.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ and_(scratch1, kFlatAsciiStringMask);
+ and_(scratch2, kFlatAsciiStringMask);
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ j(not_equal, failure);
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ceecebf7..3f000ee4 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -392,6 +392,8 @@ class MacroAssembler: public Assembler {
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+ void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
@@ -413,6 +415,17 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities.
+
+ // Checks if both objects are sequential ASCII strings, and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *on_not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 4af59dd6..f6da6937 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -59,8 +59,6 @@ namespace internal {
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -74,6 +72,8 @@ namespace internal {
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - Boolean at start (if 1, we are starting at the start of the string,
+ * otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -625,6 +625,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -667,6 +668,15 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(ebx, Operand(ebp, kStartIndex));
+ __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ mov(Operand(ebp, kAtStart), ecx);
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8e7a6a5d..d9866b72 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -123,8 +123,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -133,8 +132,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 3ebd2e61..94ef7bff 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -53,8 +53,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/src/ic.h b/src/ic.h
index be7f956b..8f0eb376 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -295,6 +295,13 @@ class KeyedLoadIC: public IC {
static void ClearInlinedVersion(Address address);
private:
+ // Bit mask to be tested against bit field for the cases when
+ // generic stub should go into slow case.
+ // Access check is necessary explicitly since generic stub does not perform
+ // map checks.
+ static const int kSlowCaseBitFieldMask =
+ (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache.
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8af472d3..505cf03e 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -4462,10 +4462,13 @@ void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
while (i1 < n1 || i2 < n2) {
CharacterRange next_range;
int range_source;
- if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
+ if (i2 == n2 ||
+ (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
+ // Next smallest element is in first set.
next_range = first_set->at(i1++);
range_source = kInsideFirst;
} else {
+ // Next smallest element is in second set.
next_range = second_set->at(i2++);
range_source = kInsideSecond;
}
diff --git a/src/list.h b/src/list.h
index aff63c38..d3c2767a 100644
--- a/src/list.h
+++ b/src/list.h
@@ -68,7 +68,8 @@ class List {
// not safe to use after operations that can change the list's
// backing store (eg, Add).
inline T& operator[](int i) const {
- ASSERT(0 <= i && i < length_);
+ ASSERT(0 <= i);
+ ASSERT(i < length_);
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/src/log.cc b/src/log.cc
index 98dd5621..5de7429e 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -155,6 +155,13 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
+ const Address functionAddr =
+ sample->fp + JavaScriptFrameConstants::kFunctionOffset;
+ if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
+ functionAddr)) {
+ sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
+ }
+
int i = 0;
const Address callback = Logger::current_state_ != NULL ?
Logger::current_state_->external_callback() : NULL;
@@ -162,11 +169,8 @@ void StackTracer::Trace(TickSample* sample) {
sample->stack[i++] = callback;
}
- SafeStackTraceFrameIterator it(
- reinterpret_cast<Address>(sample->fp),
- reinterpret_cast<Address>(sample->sp),
- reinterpret_cast<Address>(sample->sp),
- js_entry_sp);
+ SafeStackTraceFrameIterator it(sample->fp, sample->sp,
+ sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -837,14 +841,45 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- static Address prev_to_ = NULL;
+ MoveEventInternal(CODE_MOVE_EVENT, from, to);
+#endif
+}
+
+
+void Logger::CodeDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DeleteEventInternal(CODE_DELETE_EVENT, from);
+#endif
+}
+
+
+void Logger::SnapshotPositionEvent(Address addr, int pos) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
+ msg.AppendAddress(addr);
+ msg.Append(",%d", pos);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::FunctionCreateEvent(JSFunction* function) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static Address prev_code = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
- msg.AppendAddress(from);
+ msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
+ msg.AppendAddress(function->address());
msg.Append(',');
- msg.AppendAddress(to, prev_to_);
- prev_to_ = to;
+ msg.AppendAddress(function->code()->address(), prev_code);
+ prev_code = function->code()->address();
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
@@ -855,37 +890,56 @@ void Logger::CodeMoveEvent(Address from, Address to) {
}
-void Logger::CodeDeleteEvent(Address from) {
+void Logger::FunctionMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
+#endif
+}
+
+
+void Logger::FunctionDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::MoveEventInternal(LogEventsAndTags event,
+ Address from,
+ Address to) {
+ static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+ msg.Append("%s,", log_events_[event]);
msg.AppendAddress(from);
+ msg.Append(',');
+ msg.AppendAddress(to, prev_to_);
+ prev_to_ = to;
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
+#endif
-void Logger::SnapshotPositionEvent(Address addr, int pos) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
+void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
- msg.AppendAddress(addr);
- msg.Append(",%d", pos);
+ msg.Append("%s,", log_events_[event]);
+ msg.AppendAddress(from);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
+#endif
void Logger::ResourceEvent(const char* name, const char* tag) {
@@ -1069,13 +1123,17 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
static Address prev_sp = NULL;
+ static Address prev_function = NULL;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[TICK_EVENT]);
- Address prev_addr = reinterpret_cast<Address>(sample->pc);
+ Address prev_addr = sample->pc;
msg.AppendAddress(prev_addr);
msg.Append(',');
- msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
- prev_sp = reinterpret_cast<Address>(sample->sp);
+ msg.AppendAddress(sample->sp, prev_sp);
+ prev_sp = sample->sp;
+ msg.Append(',');
+ msg.AppendAddress(sample->function, prev_function);
+ prev_function = sample->function;
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1144,6 +1202,7 @@ void Logger::ResumeProfiler(int flags) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
+ LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
@@ -1178,9 +1237,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
if (sfi->is_compiled()
@@ -1290,12 +1347,22 @@ void Logger::LogCompiledFunctions() {
}
+void Logger::LogFunctionObjects() {
+ AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ if (!obj->IsJSFunction()) continue;
+ JSFunction* jsf = JSFunction::cast(obj);
+ if (!jsf->is_compiled()) continue;
+ LOG(FunctionCreateEvent(jsf));
+ }
+}
+
+
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsAccessorInfo()) continue;
AccessorInfo* ai = AccessorInfo::cast(obj);
if (!ai->name()->IsString()) continue;
diff --git a/src/log.h b/src/log.h
index e21df033..1f6e60e1 100644
--- a/src/log.h
+++ b/src/log.h
@@ -116,6 +116,9 @@ class VMState BASE_EMBEDDED {
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
+ V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
+ V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
+ V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
@@ -224,6 +227,12 @@ class Logger {
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
+ // Emits a function object create event.
+ static void FunctionCreateEvent(JSFunction* function);
+ // Emits a function move event.
+ static void FunctionMoveEvent(Address from, Address to);
+ // Emits a function delete event.
+ static void FunctionDeleteEvent(Address from);
static void SnapshotPositionEvent(Address addr, int pos);
@@ -278,6 +287,8 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
+ // Logs all compiled JSFunction objects found in the heap.
+ static void LogFunctionObjects();
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
@@ -299,6 +310,15 @@ class Logger {
const char* name,
Address entry_point);
+ // Internal configurable move event.
+ static void MoveEventInternal(LogEventsAndTags event,
+ Address from,
+ Address to);
+
+ // Internal configurable move event.
+ static void DeleteEventInternal(LogEventsAndTags event,
+ Address from);
+
// Emits aliases for compressed messages.
static void LogAliases();
diff --git a/src/macros.py b/src/macros.py
index 1e436a0a..c160b491 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -92,6 +92,7 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
+macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e284b426..1f2c37d3 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -129,7 +129,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
#endif
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) {
+ for (PagedSpace* space = spaces.next();
+ space != NULL; space = spaces.next()) {
space->PrepareForMarkCompact(compacting_collection_);
}
@@ -172,7 +173,7 @@ void MarkCompactCollector::Finish() {
int old_gen_used = 0;
OldSpaces spaces;
- while (OldSpace* space = spaces.next()) {
+ for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
old_gen_recoverable += space->Waste() + space->AvailableFree();
old_gen_used += space->Size();
}
@@ -475,8 +476,8 @@ void MarkCompactCollector::MarkDescriptorArray(
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(Heap::map_space());
- while (iterator.has_next()) {
- Object* next_object = iterator.next();
+ for (HeapObject* next_object = iterator.next();
+ next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
@@ -509,8 +510,7 @@ static void ScanOverflowedObjects(T* it) {
// so that we don't waste effort pointlessly scanning for objects.
ASSERT(!marking_stack.is_full());
- while (it->has_next()) {
- HeapObject* object = it->next();
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
if (object->IsOverflowed()) {
object->ClearOverflow();
ASSERT(object->IsMarked());
@@ -793,8 +793,9 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
- while (map_iterator.has_next()) {
- Map* map = reinterpret_cast<Map*>(map_iterator.next());
+ for (HeapObject* obj = map_iterator.next();
+ obj != NULL; obj = map_iterator.next()) {
+ Map* map = reinterpret_cast<Map*>(obj);
if (!map->IsMarked() && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
@@ -969,12 +970,6 @@ inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
inline void IgnoreNonLiveObject(HeapObject* object) {}
-// A code deletion event is logged for non-live code objects.
-inline void LogNonLiveCodeObject(HeapObject* object) {
- if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
-}
-
-
// Function template that, given a range of addresses (eg, a semispace or a
// paged space page), iterates through the objects in the range to clear
// mark bits and compute and encode forwarding addresses. As a side effect,
@@ -1122,10 +1117,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
is_previous_alive = true;
}
} else {
- if (object->IsCode()) {
- // Notify the logger that compiled code has been collected.
- LOG(CodeDeleteEvent(Code::cast(object)->address()));
- }
+ MarkCompactCollector::ReportDeleteIfNeeded(object);
if (is_previous_alive) { // Transition from live to free.
free_start = current;
is_previous_alive = false;
@@ -1204,7 +1196,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
// Compute the forwarding pointers in each space.
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
- IgnoreNonLiveObject>(
+ ReportDeleteIfNeeded>(
Heap::old_pointer_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
@@ -1212,7 +1204,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
Heap::old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
- LogNonLiveCodeObject>(
+ ReportDeleteIfNeeded>(
Heap::code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
@@ -1291,6 +1283,7 @@ class MapCompact {
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
+ ASSERT(o != NULL);
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
@@ -1316,10 +1309,8 @@ class MapCompact {
void UpdateMapPointersInLargeObjectSpace() {
LargeObjectIterator it(Heap::lo_space());
- while (true) {
- if (!it.has_next()) break;
- UpdateMapPointersInObject(it.next());
- }
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ UpdateMapPointersInObject(obj);
}
void Finish() {
@@ -1362,8 +1353,8 @@ class MapCompact {
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
- ASSERT(it->has_next());
HeapObject* next = it->next();
+ ASSERT(next != NULL);
if (next == last)
return NULL;
ASSERT(!next->IsOverflowed());
@@ -1452,8 +1443,9 @@ class MapCompact {
if (!FLAG_enable_slow_asserts)
return;
- while (map_to_evacuate_it_.has_next())
- ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+ for (HeapObject* obj = map_to_evacuate_it_.next();
+ obj != NULL; obj = map_to_evacuate_it_.next())
+ ASSERT(FreeListNode::IsFreeListNode(obj));
}
#endif
};
@@ -1486,7 +1478,8 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.FinishMapSpace();
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) {
+ for (PagedSpace* space = spaces.next();
+ space != NULL; space = spaces.next()) {
if (space == Heap::map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
@@ -1661,7 +1654,8 @@ void MarkCompactCollector::UpdatePointers() {
// Large objects do not move, the map word can be updated directly.
LargeObjectIterator it(Heap::lo_space());
- while (it.has_next()) UpdatePointersInNewObject(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ UpdatePointersInNewObject(obj);
USE(live_maps);
USE(live_pointer_olds);
@@ -1825,7 +1819,8 @@ void MarkCompactCollector::RelocateObjects() {
Page::set_rset_state(Page::IN_USE);
#endif
PagedSpaces spaces;
- while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
+ for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+ space->MCCommitRelocationInfo();
}
@@ -1906,6 +1901,11 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsJSFunction()) {
+ LOG(FunctionMoveEvent(old_addr, new_addr));
+ }
+
return obj_size;
}
@@ -1986,6 +1986,11 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
#endif
+ HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+ if (copied_to->IsJSFunction()) {
+ LOG(FunctionMoveEvent(old_addr, new_addr));
+ }
+
return obj_size;
}
@@ -2001,4 +2006,15 @@ void MarkCompactCollector::RebuildRSets() {
Heap::RebuildRSets();
}
+
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (obj->IsCode()) {
+ LOG(CodeDeleteEvent(obj->address()));
+ } else if (obj->IsJSFunction()) {
+ LOG(FunctionDeleteEvent(obj->address()));
+ }
+#endif
+}
+
} } // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 02aedb3a..ab572f69 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -115,6 +115,9 @@ class MarkCompactCollector: public AllStatic {
static bool in_use() { return state_ > PREPARE_GC; }
#endif
+ // Determine type of object and emit deletion log event.
+ static void ReportDeleteIfNeeded(HeapObject* obj);
+
private:
#ifdef DEBUG
enum CollectorState {
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 10138d91..6457ae74 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -164,10 +164,10 @@ int main(int argc, char** argv) {
}
context.Dispose();
CppByteSink sink(argv[1]);
- i::Serializer ser(&sink);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
i::Heap::CollectAllGarbage(true);
+ i::StartupSerializer ser(&sink);
ser.Serialize();
return 0;
}
diff --git a/src/objects.cc b/src/objects.cc
index 118c4891..c76fc833 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2839,7 +2839,11 @@ Object* JSObject::DefineGetterSetter(String* name,
if (result.IsReadOnly()) return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
- if (obj->IsFixedArray()) return obj;
+ if (obj->IsFixedArray()) {
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ SetNormalizedProperty(name, obj, details);
+ return obj;
+ }
}
}
}
diff --git a/src/objects.h b/src/objects.h
index 40be0df2..8730f913 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3649,6 +3649,8 @@ class JSRegExp: public JSObject {
FixedArray::kHeaderSize + kTagIndex * kPointerSize;
static const int kDataAsciiCodeOffset =
FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+ static const int kDataUC16CodeOffset =
+ FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 353d1654..ff757768 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -95,6 +95,24 @@ int OS::ActivationFrameAlignment() {
}
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
@@ -555,17 +573,17 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = mcontext.mc_eip;
- sample.sp = mcontext.mc_esp;
- sample.fp = mcontext.mc_ebp;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
#elif V8_HOST_ARCH_X64
- sample.pc = mcontext.mc_rip;
- sample.sp = mcontext.mc_rsp;
- sample.fp = mcontext.mc_rbp;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
#elif V8_HOST_ARCH_ARM
- sample.pc = mcontext.mc_r15;
- sample.sp = mcontext.mc_r13;
- sample.fp = mcontext.mc_r11;
+ sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif
active_sampler_->SampleStack(&sample);
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index bfcd8fba..005b1deb 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -159,6 +159,24 @@ int OS::ActivationFrameAlignment() {
}
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
@@ -707,23 +725,23 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
- sample.pc = mcontext.gregs[REG_EIP];
- sample.sp = mcontext.gregs[REG_ESP];
- sample.fp = mcontext.gregs[REG_EBP];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
- sample.pc = mcontext.gregs[REG_RIP];
- sample.sp = mcontext.gregs[REG_RSP];
- sample.fp = mcontext.gregs[REG_RBP];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- sample.pc = mcontext.gregs[R15];
- sample.sp = mcontext.gregs[R13];
- sample.fp = mcontext.gregs[R11];
+ sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
- sample.pc = mcontext.arm_pc;
- sample.sp = mcontext.arm_sp;
- sample.fp = mcontext.arm_fp;
+ sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#endif
if (IsVmThread())
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0d5be45e..e379ae22 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -259,6 +259,24 @@ int OS::ActivationFrameAlignment() {
}
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
int OS::StackWalk(Vector<StackFrame> frames) {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL)
@@ -559,9 +577,9 @@ class Sampler::PlatformData : public Malloced {
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
- sample.pc = state.REGISTER_FIELD(ip);
- sample.sp = state.REGISTER_FIELD(sp);
- sample.fp = state.REGISTER_FIELD(bp);
+ sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(&sample);
}
thread_resume(profiled_thread_);
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 6d273047..62e60044 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -94,6 +94,24 @@ int OS::ActivationFrameAlignment() {
}
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+ time_t tv = time(NULL);
+ struct tm* t = localtime(&tv);
+ // tm_gmtoff includes any daylight savings offset, so subtract it.
+ return static_cast<double>(t->tm_gmtoff * msPerSecond -
+ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 41e0e64f..89f4d983 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -99,15 +99,6 @@ int64_t OS::Ticks() {
}
-const char* OS::LocalTimezone(double time) {
- if (isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
- if (NULL == t) return "";
- return t->tm_zone;
-}
-
-
double OS::DaylightSavingsOffset(double time) {
if (isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -117,15 +108,6 @@ double OS::DaylightSavingsOffset(double time) {
}
-double OS::LocalTimeOffset() {
- time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
- // tm_gmtoff includes any daylight savings offset, so subtract it.
- return static_cast<double>(t->tm_gmtoff * msPerSecond -
- (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
// ----------------------------------------------------------------------------
// POSIX stdio support.
//
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
new file mode 100644
index 00000000..85c2c54c
--- /dev/null
+++ b/src/platform-solaris.cc
@@ -0,0 +1,607 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
+// parts the implementation is in platform-posix.cc.
+
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <sys/stack.h> // for stack alignment
+#include <unistd.h> // getpagesize(), usleep()
+#include <sys/mman.h> // mmap()
+#include <execinfo.h> // backtrace(), backtrace_symbols()
+#include <pthread.h>
+#include <sched.h> // for sched_yield
+#include <semaphore.h>
+#include <time.h>
+#include <sys/time.h> // gettimeofday(), timeradd()
+#include <errno.h>
+#include <ieeefp.h> // finite()
+#include <signal.h> // sigemptyset(), etc
+
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// 0 is never a valid thread id on Solaris since the main thread is 1 and
+// subsequent have their ids incremented from there
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ return ceil(x);
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly will cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // Solaris runs on a lot of things.
+}
+
+
+int OS::ActivationFrameAlignment() {
+ return STACK_ALIGN;
+}
+
+
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on Solaris.
+}
+
+
+double OS::LocalTimeOffset() {
+ // On Solaris, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return static_cast<size_t>(getpagesize());
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ mprotect(address, size, prot);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+ useconds_t ms = static_cast<useconds_t>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+ asm("int $3");
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ int frames_size = frames.length();
+ void** addresses = NewArray<void*>(frames_size);
+
+ int frames_count = backtrace(addresses, frames_size);
+
+ char** symbols;
+ symbols = backtrace_symbols(addresses, frames_count);
+ if (symbols == NULL) {
+ DeleteArray(addresses);
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+ "%s",
+ symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ DeleteArray(addresses);
+ free(symbols);
+
+ return frames_count;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class SolarisMutex : public Mutex {
+ public:
+
+ SolarisMutex() {
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&mutex_, &attr);
+ }
+
+ ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
+
+ int Lock() { return pthread_mutex_lock(&mutex_); }
+
+ int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+ private:
+ pthread_mutex_t mutex_;
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new SolarisMutex();
+}
+
+
+class SolarisSemaphore : public Semaphore {
+ public:
+ explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait();
+ virtual bool Wait(int timeout);
+ virtual void Signal() { sem_post(&sem_); }
+ private:
+ sem_t sem_;
+};
+
+
+void SolarisSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+} while (false)
+#endif
+
+
+#ifndef timeradd
+#define timeradd(a, b, result) \
+ do { \
+ (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
+ (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
+ if ((result)->tv_usec >= 1000000) { \
+ ++(result)->tv_sec; \
+ (result)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+#endif
+
+
+bool SolarisSemaphore::Wait(int timeout) {
+ const long kOneSecondMicros = 1000000; // NOLINT
+
+ // Split timeout into second and nanosecond parts.
+ struct timeval delta;
+ delta.tv_usec = timeout % kOneSecondMicros;
+ delta.tv_sec = timeout / kOneSecondMicros;
+
+ struct timeval current_time;
+ // Get the current time.
+ if (gettimeofday(&current_time, NULL) == -1) {
+ return false;
+ }
+
+ // Calculate time for end of timeout.
+ struct timeval end_time;
+ timeradd(&current_time, &delta, &end_time);
+
+ struct timespec ts;
+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&sem_, &ts);
+ if (result == 0) return true; // Successfully got semaphore.
+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new SolarisSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ if (active_sampler_ == NULL) return;
+
+ TickSample sample;
+ sample.pc = 0;
+ sample.sp = 0;
+ sample.fp = 0;
+
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
+ active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+ : interval_(interval), profiling_(profiling), active_(false) {
+ data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void Sampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 1be4b77f..81b0d4c1 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1813,13 +1813,13 @@ class Sampler::PlatformData : public Malloced {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
- sample.pc = context.Rip;
- sample.sp = context.Rsp;
- sample.fp = context.Rbp;
+ sample.pc = reinterpret_cast<Address>(context.Rip);
+ sample.sp = reinterpret_cast<Address>(context.Rsp);
+ sample.fp = reinterpret_cast<Address>(context.Rbp);
#else
- sample.pc = context.Eip;
- sample.sp = context.Esp;
- sample.fp = context.Ebp;
+ sample.pc = reinterpret_cast<Address>(context.Eip);
+ sample.sp = reinterpret_cast<Address>(context.Esp);
+ sample.fp = reinterpret_cast<Address>(context.Ebp);
#endif
sampler_->SampleStack(&sample);
}
diff --git a/src/platform.h b/src/platform.h
index 75e557cb..bc2e9d64 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -44,6 +44,12 @@
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
+#ifdef __sun
+// On Solaris, to get isinf, INFINITY, fpclassify and other macros one needs
+// to define this symbol
+#define __C99FEATURES__ 1
+#endif
+
#define V8_INFINITY INFINITY
// Windows specific stuff.
@@ -506,11 +512,18 @@ class Socket {
// TickSample captures the information collected for each sample.
class TickSample {
public:
- TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
- uintptr_t pc; // Instruction pointer.
- uintptr_t sp; // Stack pointer.
- uintptr_t fp; // Frame pointer.
- StateTag state; // The state of the VM.
+ TickSample()
+ : pc(NULL),
+ sp(NULL),
+ fp(NULL),
+ function(NULL),
+ state(OTHER),
+ frames_count(0) {}
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ Address function; // The last called JS function.
+ StateTag state; // The state of the VM.
static const int kMaxFramesCount = 100;
EmbeddedVector<Address, kMaxFramesCount> stack; // Call stack.
int frames_count; // Number of captured frames.
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 3685fcd3..0fcfc33d 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -122,7 +122,10 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
bool is_ascii = subject->IsAsciiRepresentation();
+ // The string has been flattened, so it it is a cons string it contains the
+ // full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
+ ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
subject_ptr = ConsString::cast(subject_ptr)->first();
}
// Ensure that an underlying string has the same ascii-ness.
@@ -141,8 +144,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
start_offset,
input_start,
input_end,
- offsets_vector,
- previous_index == 0);
+ offsets_vector);
return res;
}
@@ -153,14 +155,11 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output,
- bool at_start) {
+ int* output) {
typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int);
+ const byte*, int*, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
- int at_start_val = at_start ? 1 : 0;
-
// Ensure that the minimum stack has been allocated.
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
@@ -172,7 +171,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_start,
input_end,
output,
- at_start_val,
stack_base,
direct_call);
ASSERT(result <= SUCCESS);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 2e619bd1..105d8cc4 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -218,8 +218,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int start_offset,
const byte* input_start,
const byte* input_end,
- int* output,
- bool at_start);
+ int* output);
};
#endif // V8_NATIVE_REGEXP
diff --git a/src/runtime.cc b/src/runtime.cc
index b6da528e..51c1ba23 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4782,7 +4782,7 @@ static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
return Code::cast(code);
}
- return Builtins::builtin(Builtins::JSConstructStubGeneric);
+ return shared->construct_stub();
}
@@ -4830,6 +4830,7 @@ static Object* Runtime_NewObject(Arguments args) {
CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
CLEAR_EXCEPTION,
0);
+ LOG(FunctionCreateEvent(*function));
}
bool first_allocation = !function->has_initial_map();
@@ -7211,9 +7212,8 @@ Object* Runtime::FindSharedFunctionInfoInScript(Handle<Script> script,
Handle<SharedFunctionInfo> last;
while (!done) {
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- ASSERT(obj != NULL);
+ for (HeapObject* obj = iterator.next();
+ obj != NULL; obj = iterator.next()) {
if (obj->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
if (shared->script() == *script) {
@@ -7669,10 +7669,10 @@ static int DebugReferencedBy(JSObject* target,
int count = 0;
JSObject* last = NULL;
HeapIterator iterator;
- while (iterator.has_next() &&
+ HeapObject* heap_obj = NULL;
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
- HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
// Skip context extension objects and argument arrays as these are
// checked in the context of functions using them.
@@ -7782,10 +7782,10 @@ static int DebugConstructedBy(JSFunction* constructor, int max_references,
// Iterate the heap.
int count = 0;
HeapIterator iterator;
- while (iterator.has_next() &&
+ HeapObject* heap_obj = NULL;
+ while (((heap_obj = iterator.next()) != NULL) &&
(max_references == 0 || count < max_references)) {
// Only look at all JSObjects.
- HeapObject* heap_obj = iterator.next();
if (heap_obj->IsJSObject()) {
JSObject* obj = JSObject::cast(heap_obj);
if (obj->map()->constructor() == constructor) {
@@ -7933,8 +7933,8 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// script data.
Handle<Script> script;
HeapIterator iterator;
- while (script.is_null() && iterator.has_next()) {
- HeapObject* obj = iterator.next();
+ HeapObject* obj = NULL;
+ while (script.is_null() && ((obj = iterator.next()) != NULL)) {
// If a script is found check if it has the script data requested.
if (obj->IsScript()) {
if (Script::cast(obj)->name()->IsString()) {
diff --git a/src/runtime.js b/src/runtime.js
index ce2f197f..c4c855eb 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -541,7 +541,9 @@ function ToObject(x) {
if (IS_STRING(x)) return new $String(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
- if (x == null) throw %MakeTypeError('null_to_object', []);
+ if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
+ throw %MakeTypeError('null_to_object', []);
+ }
return x;
}
diff --git a/src/serialize.cc b/src/serialize.cc
index ec3a967b..6b858939 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -44,67 +44,6 @@
namespace v8 {
namespace internal {
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- static bool IsMapped(HeapObject* obj) {
- EnsureMapExists();
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- static int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- static void Map(HeapObject* obj, int to) {
- EnsureMapExists();
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- static void Zap() {
- if (serialization_map_ != NULL) {
- delete serialization_map_;
- }
- serialization_map_ = NULL;
- }
-
- private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- static void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- static void EnsureMapExists() {
- if (serialization_map_ == NULL) {
- serialization_map_ = new HashMap(&SerializationMatchFun);
- }
- }
-
- static HashMap* serialization_map_;
-};
-
-
-HashMap* SerializationAddressMapper::serialization_map_ = NULL;
-
-
-
// -----------------------------------------------------------------------------
// Coding of external references.
@@ -647,10 +586,13 @@ void Deserializer::Deserialize() {
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ // Make sure the entire partial snapshot cache is traversed, filling it with
+ // valid object pointers.
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
- ASSERT(source_->AtEOF());
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateWeakRoots(this, VISIT_ALL);
}
@@ -666,7 +608,8 @@ void Deserializer::DeserializePartial(Object** root) {
}
-void Deserializer::TearDown() {
+Deserializer::~Deserializer() {
+ ASSERT(source_->AtEOF());
if (external_reference_decoder_ != NULL) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
@@ -891,6 +834,16 @@ void Deserializer::ReadChunk(Object** current,
*current++ = Heap::roots_address()[root_id];
break;
}
+ case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
+ int cache_index = source_->GetInt();
+ *current++ = partial_snapshot_cache_[cache_index];
+ break;
+ }
+ case SYNCHRONIZE: {
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ UNREACHABLE();
+ }
default:
UNREACHABLE();
}
@@ -944,7 +897,6 @@ Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL),
- partial_(false),
large_object_total_(0) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
@@ -952,7 +904,7 @@ Serializer::Serializer(SnapshotByteSink* sink)
}
-void Serializer::Serialize() {
+void StartupSerializer::SerializeStrongReferences() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
@@ -966,20 +918,30 @@ void Serializer::Serialize() {
CHECK_NE(v8::INSTALLED, ext->state());
}
external_reference_encoder_ = new ExternalReferenceEncoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
-void Serializer::SerializePartial(Object** object) {
- partial_ = true;
+void PartialSerializer::Serialize(Object** object) {
external_reference_encoder_ = new ExternalReferenceEncoder();
this->VisitPointer(object);
+
+ // After we have done the partial serialization the partial snapshot cache
+ // will contain some references needed to decode the partial snapshot. We
+ // fill it up with undefineds so it has a predictable length so the
+ // deserialization code doesn't need to know the length.
+ for (int index = partial_snapshot_cache_length_;
+ index < kPartialSnapshotCacheCapacity;
+ index++) {
+ partial_snapshot_cache_[index] = Heap::undefined_value();
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+ }
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
@@ -998,7 +960,54 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
-int Serializer::RootIndex(HeapObject* heap_object) {
+Object* SerializerDeserializer::partial_snapshot_cache_[
+ kPartialSnapshotCacheCapacity];
+int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
+
+
+// This ensures that the partial snapshot cache keeps things alive during GC and
+// tracks their movement. When it is called during serialization of the startup
+// snapshot the partial snapshot is empty, so nothing happens. When the partial
+// (context) snapshot is created, this array is populated with the pointers that
+// the partial snapshot will need. As that happens we emit serialized objects to
+// the startup snapshot that correspond to the elements of this cache array. On
+// deserialization we therefore need to visit the cache array. This fills it up
+// with pointers to deserialized objects.
+void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
+ visitor->VisitPointers(
+ &partial_snapshot_cache_[0],
+ &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+}
+
+
+// When deserializing we need to set the size of the snapshot cache. This means
+// the root iteration code (above) will iterate over array elements, writing the
+// references to deserialized objects in them.
+void SerializerDeserializer::SetSnapshotCacheSize(int size) {
+ partial_snapshot_cache_length_ = size;
+}
+
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+ for (int i = 0; i < partial_snapshot_cache_length_; i++) {
+ Object* entry = partial_snapshot_cache_[i];
+ if (entry == heap_object) return i;
+ }
+ // We didn't find the object in the cache. So we add it to the cache and
+ // then visit the pointer so that it becomes part of the startup snapshot
+ // and we can refer to it from the partial snapshot.
+ int length = partial_snapshot_cache_length_;
+ CHECK(length < kPartialSnapshotCacheCapacity);
+ partial_snapshot_cache_[length] = heap_object;
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+ // We don't recurse from the startup snapshot generator into the partial
+ // snapshot generator.
+ ASSERT(length == partial_snapshot_cache_length_);
+ return partial_snapshot_cache_length_++;
+}
+
+
+int PartialSerializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
Object* root = Heap::roots_address()[i];
if (root == heap_object) return i;
@@ -1007,67 +1016,136 @@ int Serializer::RootIndex(HeapObject* heap_object) {
}
-void Serializer::SerializeObject(
- Object* o,
+// Encode the location of an already deserialized object in order to write its
+// location into a later object. We can encode the location as an offset from
+// the start of the deserialized objects or as an offset backwards from the
+// current allocation pointer.
+void Serializer::SerializeReferenceToPreviousObject(
+ int space,
+ int address,
ReferenceRepresentation reference_representation) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- if (partial_) {
- int root_index = RootIndex(heap_object);
- if (root_index != kInvalidRootIndex) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- return;
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ // For paged space it is simple to encode back from current allocation if
+ // the object is on the same page as the current allocation pointer.
+ if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+ (address >> kPageSizeBits)) {
+ from_start = false;
+ address = offset;
}
- // All the symbols that the snapshot needs should be in the root table.
- ASSERT(!heap_object->IsSymbol());
- }
- if (SerializationAddressMapper::IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = SerializationAddressMapper::MappedTo(heap_object);
- int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
- }
- } else if (space == NEW_SPACE) {
- if (offset < address) {
- from_start = false;
- address = offset;
- }
+ } else if (space == NEW_SPACE) {
+ // For new space it is always simple to encode back from current allocation.
+ if (offset < address) {
+ from_start = false;
+ address = offset;
}
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- if (reference_representation == CODE_TARGET_REPRESENTATION) {
- if (from_start) {
- sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
- sink_->PutInt(address, "address");
- } else {
- sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
- sink_->PutInt(address, "address");
- }
+ }
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ // On some architectures references between code objects are encoded
+ // specially (as relative offsets). Such references have their own
+ // special tags to simplify the deserializer.
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
} else {
- CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
- if (from_start) {
-#define COMMON_REFS_CASE(tag, common_space, common_offset) \
- if (space == common_space && address == common_offset) { \
- sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+ sink_->PutInt(address, "address");
+ }
+ } else {
+ // Regular absolute references.
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+ // There are some common offsets that have their own specialized encoding.
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
- sink_->PutInt(address, "address");
- }
- } else {
- sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
sink_->PutInt(address, "address");
}
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
}
+ }
+}
+
+
+void StartupSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
+ } else {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer object_serializer(this,
+ heap_object,
+ sink_,
+ reference_representation);
+ object_serializer.Serialize();
+ }
+}
+
+
+void StartupSerializer::SerializeWeakReferences() {
+ for (int i = partial_snapshot_cache_length_;
+ i < kPartialSnapshotCacheCapacity;
+ i++) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
+ }
+ Heap::IterateWeakRoots(this, VISIT_ALL);
+}
+
+
+void PartialSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ int root_index;
+ if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
+ }
+
+ if (ShouldBeInThePartialSnapshotCache(heap_object)) {
+ int cache_index = PartialSnapshotCacheIndex(heap_object);
+ sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+ sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+ return;
+ }
+
+ // Pointers from the partial snapshot to the objects in the startup snapshot
+ // should go through the root array or through the partial snapshot cache.
+ // If this is not the case you may have to add something to the root array.
+ ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
+ // All the symbols that the partial snapshot needs should be either in the
+ // root table or in the partial snapshot cache.
+ ASSERT(!heap_object->IsSymbol());
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
@@ -1079,7 +1157,6 @@ void Serializer::SerializeObject(
}
-
void Serializer::ObjectSerializer::Serialize() {
int space = Serializer::SpaceOfObject(object_);
int size = object_->Size();
@@ -1096,9 +1173,8 @@ void Serializer::ObjectSerializer::Serialize() {
// Mark this object as already serialized.
bool start_new_page;
- SerializationAddressMapper::Map(
- object_,
- serializer_->Allocate(space, size, &start_new_page));
+ int offset = serializer_->Allocate(space, size, &start_new_page);
+ serializer_->address_mapper()->AddMapping(object_, offset);
if (start_new_page) {
sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
sink_->PutSection(space, "NewPageSpace");
diff --git a/src/serialize.h b/src/serialize.h
index 8dd193f5..ce3b0061 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -147,7 +147,7 @@ class SnapshotByteSource {
return position_ == length_;
}
- const int position() { return position_; }
+ int position() { return position_; }
private:
const byte* data_;
@@ -185,9 +185,14 @@ class SnapshotByteSource {
f(14, 32) \
f(15, 36)
-// The SerDes class is a common superclass for Serializer and Deserializer
-// which is used to store common constants and methods used by both.
-class SerDes: public ObjectVisitor {
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer: public ObjectVisitor {
+ public:
+ static void Iterate(ObjectVisitor* visitor);
+ static void SetSnapshotCacheSize(int size);
+
protected:
enum DataType {
RAW_DATA_SERIALIZATION = 0,
@@ -202,7 +207,8 @@ class SerDes: public ObjectVisitor {
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
ROOT_SERIALIZATION = 39,
- // Free: 40-47.
+ PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
+ // Free: 41-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
@@ -227,17 +233,21 @@ class SerDes: public ObjectVisitor {
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
+
+ static int partial_snapshot_cache_length_;
+ static const int kPartialSnapshotCacheCapacity = 1024;
+ static Object* partial_snapshot_cache_[];
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerDes {
+class Deserializer: public SerializerDeserializer {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer(SnapshotByteSource* source);
- virtual ~Deserializer() { }
+ virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
void Deserialize();
@@ -249,8 +259,6 @@ class Deserializer: public SerDes {
virtual void Synchronize(const char* tag);
#endif
- static void TearDown();
-
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -272,7 +280,7 @@ class Deserializer: public SerDes {
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
- List<Address> pages_[SerDes::kNumberOfSpaces];
+ List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
static ExternalReferenceDecoder* external_reference_decoder_;
@@ -300,13 +308,62 @@ class SnapshotByteSink {
};
-class Serializer : public SerDes {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
+ public:
+ SerializationAddressMapper()
+ : serialization_map_(new HashMap(&SerializationMatchFun)),
+ no_allocation_(new AssertNoAllocation()) { }
+
+ ~SerializationAddressMapper() {
+ delete serialization_map_;
+ delete no_allocation_;
+ }
+
+ bool IsMapped(HeapObject* obj) {
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
+ }
+
+ int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return static_cast<int>(reinterpret_cast<intptr_t>(
+ serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+ }
+
+ void AddMapping(HeapObject* obj, int to) {
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
+ }
+
+ private:
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
+
+ HashMap* serialization_map_;
+ AssertNoAllocation* no_allocation_;
+ DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
+};
+
+
+class Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
- // Serialize the current state of the heap.
- void Serialize();
- // Serialize a single object and the objects reachable from it.
- void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
@@ -327,15 +384,20 @@ class Serializer : public SerDes {
// going on.
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
+ SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
- private:
+ protected:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
+ static const int kInvalidRootIndex = -1;
+ virtual int RootIndex(HeapObject* heap_object) = 0;
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer,
@@ -371,7 +433,12 @@ class Serializer : public SerDes {
int bytes_processed_so_far_;
};
- void SerializeObject(Object* o, ReferenceRepresentation representation);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation) = 0;
+ void SerializeReferenceToPreviousObject(
+ int space,
+ int address,
+ ReferenceRepresentation reference_representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
@@ -386,8 +453,6 @@ class Serializer : public SerDes {
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
- int RootIndex(HeapObject* heap_object);
- static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
@@ -397,11 +462,11 @@ class Serializer : public SerDes {
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
- bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
int large_object_total_;
+ SerializationAddressMapper address_mapper_;
friend class ObjectSerializer;
friend class Deserializer;
@@ -409,6 +474,62 @@ class Serializer : public SerDes {
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
+
+class PartialSerializer : public Serializer {
+ public:
+ PartialSerializer(Serializer* startup_snapshot_serializer,
+ SnapshotByteSink* sink)
+ : Serializer(sink),
+ startup_serializer_(startup_snapshot_serializer) {
+ }
+
+ // Serialize the objects reachable from a single object pointer.
+ virtual void Serialize(Object** o);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+
+ protected:
+ virtual int RootIndex(HeapObject* o);
+ virtual int PartialSnapshotCacheIndex(HeapObject* o);
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return o->IsString() || o->IsSharedFunctionInfo();
+ }
+
+ private:
+ Serializer* startup_serializer_;
+ DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+
+class StartupSerializer : public Serializer {
+ public:
+ explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ // Clear the cache of objects used by the partial snapshot. After the
+ // strong roots have been serialized we can create a partial snapshot
+ // which will repopulate the cache with objects neede by that partial
+ // snapshot.
+ partial_snapshot_cache_length_ = 0;
+ }
+ // Serialize the current state of the heap. The order is:
+ // 1) Strong references.
+ // 2) Partial snapshot cache.
+ // 3) Weak references (eg the symbol table).
+ virtual void SerializeStrongReferences();
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+ void SerializeWeakReferences();
+ void Serialize() {
+ SerializeStrongReferences();
+ SerializeWeakReferences();
+ }
+
+ private:
+ virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return false;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_SERIALIZE_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 448c3fd7..1e81b8ec 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -59,42 +59,4 @@ bool Snapshot::Initialize(const char* snapshot_file) {
return false;
}
-
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
- }
- }
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
- }
- }
- virtual int Position() {
- return ftell(fp_);
- }
-
- private:
- FILE* fp_;
-};
-
-
-bool Snapshot::WriteToFile(const char* snapshot_file) {
- FileByteSink file(snapshot_file);
- Serializer ser(&file);
- ser.Serialize();
- return true;
-}
-
-
-
} } // namespace v8::internal
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 847bb9ad..4fd8a6c8 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -36,32 +36,6 @@ namespace internal {
// -----------------------------------------------------------------------------
-// HeapObjectIterator
-
-bool HeapObjectIterator::has_next() {
- if (cur_addr_ < cur_limit_) {
- return true; // common case
- }
- ASSERT(cur_addr_ == cur_limit_);
- return HasNextInNextPage(); // slow path
-}
-
-
-HeapObject* HeapObjectIterator::next() {
- ASSERT(has_next());
-
- HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
- ASSERT_OBJECT_SIZE(obj_size);
-
- cur_addr_ += obj_size;
- ASSERT(cur_addr_ <= cur_limit_);
-
- return obj;
-}
-
-
-// -----------------------------------------------------------------------------
// PageIterator
bool PageIterator::has_next() {
diff --git a/src/spaces.cc b/src/spaces.cc
index cd093980..28509003 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -82,8 +82,8 @@ void HeapObjectIterator::Initialize(Address cur, Address end,
}
-bool HeapObjectIterator::HasNextInNextPage() {
- if (cur_addr_ == end_addr_) return false;
+HeapObject* HeapObjectIterator::FromNextPage() {
+ if (cur_addr_ == end_addr_) return NULL;
Page* cur_page = Page::FromAllocationTop(cur_addr_);
cur_page = cur_page->next_page();
@@ -92,12 +92,12 @@ bool HeapObjectIterator::HasNextInNextPage() {
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
- if (cur_addr_ == end_addr_) return false;
+ if (cur_addr_ == end_addr_) return NULL;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
#endif
- return true;
+ return FromCurrentPage();
}
@@ -1437,7 +1437,8 @@ void NewSpace::ClearHistograms() {
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
- while (it.has_next()) RecordAllocation(it.next());
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+ RecordAllocation(obj);
}
@@ -2054,8 +2055,7 @@ static void CollectCommentStatistics(RelocIterator* it) {
// - by code comment
void PagedSpace::CollectCodeStatistics() {
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) {
- HeapObject* obj = obj_it.next();
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2157,7 +2157,8 @@ void OldSpace::ReportStatistics() {
ClearHistograms();
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ CollectHistogramInfo(obj);
ReportHistogram(true);
}
@@ -2393,7 +2394,8 @@ void FixedSpace::ReportStatistics() {
ClearHistograms();
HeapObjectIterator obj_it(this);
- while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+ CollectHistogramInfo(obj);
ReportHistogram(false);
}
@@ -2462,7 +2464,8 @@ LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
HeapObject* LargeObjectIterator::next() {
- ASSERT(has_next());
+ if (current_ == NULL) return NULL;
+
HeapObject* object = current_->GetObject();
current_ = current_->next();
return object;
@@ -2639,8 +2642,7 @@ void LargeObjectSpace::ClearRSet() {
ASSERT(Page::is_rset_in_use());
LargeObjectIterator it(this);
- while (it.has_next()) {
- HeapObject* object = it.next();
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays need remembered set support.
if (object->IsFixedArray()) {
@@ -2668,11 +2670,10 @@ void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
30);
LargeObjectIterator it(this);
- while (it.has_next()) {
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
- HeapObject* object = it.next();
if (object->IsFixedArray()) {
// Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
@@ -2718,9 +2719,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
// Free the chunk.
- if (object->IsCode()) {
- LOG(CodeDeleteEvent(object->address()));
- }
+ MarkCompactCollector::ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
@@ -2800,8 +2799,8 @@ void LargeObjectSpace::Verify() {
void LargeObjectSpace::Print() {
LargeObjectIterator it(this);
- while (it.has_next()) {
- it.next()->Print();
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ obj->Print();
}
}
@@ -2811,9 +2810,9 @@ void LargeObjectSpace::ReportStatistics() {
int num_objects = 0;
ClearHistograms();
LargeObjectIterator it(this);
- while (it.has_next()) {
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
num_objects++;
- CollectHistogramInfo(it.next());
+ CollectHistogramInfo(obj);
}
PrintF(" number of objects %d\n", num_objects);
@@ -2823,8 +2822,7 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
LargeObjectIterator obj_it(this);
- while (obj_it.has_next()) {
- HeapObject* obj = obj_it.next();
+ for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
code_kind_statistics[code->kind()] += code->Size();
@@ -2835,8 +2833,7 @@ void LargeObjectSpace::CollectCodeStatistics() {
void LargeObjectSpace::PrintRSet() {
LargeObjectIterator it(this);
- while (it.has_next()) {
- HeapObject* object = it.next();
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
if (object->IsFixedArray()) {
Page* page = Page::FromAddress(object->address());
diff --git a/src/spaces.h b/src/spaces.h
index 4786fb4d..f7a04398 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -597,15 +597,14 @@ class MemoryAllocator : public AllStatic {
// Interface for heap object iterator to be implemented by all object space
// object iterators.
//
-// NOTE: The space specific object iterators also implements the own has_next()
-// and next() methods which are used to avoid using virtual functions
+// NOTE: The space specific object iterators also implements the own next()
+// method which is used to avoid using virtual functions
// iterating a specific space.
class ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() { }
- virtual bool has_next_object() = 0;
virtual HeapObject* next_object() = 0;
};
@@ -645,11 +644,11 @@ class HeapObjectIterator: public ObjectIterator {
Address start,
HeapObjectCallback size_func);
- inline bool has_next();
- inline HeapObject* next();
+ inline HeapObject* next() {
+ return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+ }
// implementation of ObjectIterator.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -659,9 +658,21 @@ class HeapObjectIterator: public ObjectIterator {
HeapObjectCallback size_func_; // size function
Page* end_page_; // caches the page of the end address
- // Slow path of has_next, checks whether there are more objects in
- // the next page.
- bool HasNextInNextPage();
+ HeapObject* FromCurrentPage() {
+ ASSERT(cur_addr_ < cur_limit_);
+
+ HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+ int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+ ASSERT_OBJECT_SIZE(obj_size);
+
+ cur_addr_ += obj_size;
+ ASSERT(cur_addr_ <= cur_limit_);
+
+ return obj;
+ }
+
+ // Slow path of next, goes into the next page.
+ HeapObject* FromNextPage();
// Initializes fields.
void Initialize(Address start, Address end, HeapObjectCallback size_func);
@@ -982,6 +993,18 @@ class PagedSpace : public Space {
return Page::FromAllocationTop(alloc_info.limit);
}
+ int CountPagesToTop() {
+ Page* p = Page::FromAllocationTop(allocation_info_.top);
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ int counter = 1;
+ while (it.has_next()) {
+ if (it.next() == p) return counter;
+ counter++;
+ }
+ UNREACHABLE();
+ return -1;
+ }
+
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS. Newly allocated
// pages are append to the last_page;
@@ -1194,10 +1217,8 @@ class SemiSpaceIterator : public ObjectIterator {
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
SemiSpaceIterator(NewSpace* space, Address start);
- bool has_next() {return current_ < limit_; }
-
HeapObject* next() {
- ASSERT(has_next());
+ if (current_ == limit_) return NULL;
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1207,7 +1228,6 @@ class SemiSpaceIterator : public ObjectIterator {
}
// Implementation of the ObjectIterator functions.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
@@ -1753,8 +1773,11 @@ class FixedSpace : public PagedSpace {
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map") {}
+ MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
+ : FixedSpace(max_capacity, id, Map::kSize, "map"),
+ max_map_space_pages_(max_map_space_pages) {
+ ASSERT(max_map_space_pages < kMaxMapPageIndex);
+ }
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -1762,24 +1785,21 @@ class MapSpace : public FixedSpace {
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
- // Constants.
- static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+ static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
// Are map pointers encodable into map word?
bool MapPointersEncodable() {
if (!FLAG_use_big_map_space) {
- ASSERT(CountTotalPages() <= kMaxMapPageIndex);
+ ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
return true;
}
- int n_of_pages = Capacity() / Page::kObjectAreaSize;
- ASSERT(n_of_pages == CountTotalPages());
- return n_of_pages <= kMaxMapPageIndex;
+ return CountPagesToTop() <= max_map_space_pages_;
}
// Should be called after forced sweep to find out if map space needs
// compaction.
bool NeedsCompaction(int live_maps) {
- return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+ return !MapPointersEncodable() && live_maps <= CompactionThreshold();
}
Address TopAfterCompaction(int live_maps) {
@@ -1838,10 +1858,14 @@ class MapSpace : public FixedSpace {
static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
// Do map space compaction if there is a page gap.
- static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+ int CompactionThreshold() {
+ return kMapsPerPage * (max_map_space_pages_ - 1);
+ }
+
+ const int max_map_space_pages_;
// An array of page start address in a map space.
- Address page_addresses_[kMaxMapPageIndex + 1];
+ Address page_addresses_[kMaxMapPageIndex];
public:
TRACK_MEMORY("MapSpace")
@@ -2036,11 +2060,9 @@ class LargeObjectIterator: public ObjectIterator {
explicit LargeObjectIterator(LargeObjectSpace* space);
LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
- bool has_next() { return current_ != NULL; }
HeapObject* next();
// implementation of ObjectIterator.
- virtual bool has_next_object() { return has_next(); }
virtual HeapObject* next_object() { return next(); }
private:
diff --git a/src/v8-counters.h b/src/v8-counters.h
index fb1e9265..7397c304 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -60,40 +60,44 @@ namespace internal {
// lines) rather than one macro (of length about 80 lines) to work around
// this problem. Please avoid using recursive macros of this length when
// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* Mallocs from PCRE */ \
- SC(pcre_mallocs, V8.PcreMallocCount) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(arguments_adaptors, V8.ArgumentsAdaptors) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(regexp_cache_hits, V8.RegExpCacheHits) \
- SC(regexp_cache_misses, V8.RegExpCacheMisses) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize)
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* Mallocs from PCRE */ \
+ SC(pcre_mallocs, V8.PcreMallocCount) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ SC(script_wrappers, V8.ScriptWrappers) \
+ SC(call_initialize_stubs, V8.CallInitializeStubs) \
+ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
+ SC(call_normal_stubs, V8.CallNormalStubs) \
+ SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
+ SC(arguments_adaptors, V8.ArgumentsAdaptors) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ SC(regexp_cache_hits, V8.RegExpCacheHits) \
+ SC(regexp_cache_misses, V8.RegExpCacheMisses) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize) \
+ /* Amount of source code compiled with the old codegen. */ \
+ SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \
+ /* Amount of source code compiled with the full codegen. */ \
+ SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
#define STATS_COUNTER_LIST_2(SC) \
diff --git a/src/v8.cc b/src/v8.cc
index db570a4f..3bec827a 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -146,7 +146,6 @@ void V8::TearDown() {
Heap::TearDown();
Logger::TearDown();
- Deserializer::TearDown();
is_running_ = false;
has_been_disposed_ = true;
diff --git a/src/v8natives.js b/src/v8natives.js
index 3dcf430b..74750653 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -197,7 +197,7 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- return "[object " + %_ClassOf(this) + "]";
+ return "[object " + %_ClassOf(ToObject(this)) + "]";
}
@@ -209,7 +209,7 @@ function ObjectToLocaleString() {
// ECMA-262 - 15.2.4.4
function ObjectValueOf() {
- return this;
+ return ToObject(this);
}
diff --git a/src/version.cc b/src/version.cc
index 2724f6e4..ab2eab36 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
-#define MINOR_VERSION 0
-#define BUILD_NUMBER 7
+#define MINOR_VERSION 1
+#define BUILD_NUMBER 0
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 4ac39339..9cfe98ab 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1537,6 +1537,40 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
+void Assembler::repmovsb() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsw() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66); // Operand size override.
+ emit(0xF3);
+ emit(0xA4);
+}
+
+
+void Assembler::repmovsl() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit(0xA5);
+}
+
+
+void Assembler::repmovsq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_rex_64();
+ emit(0xA5);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2079,6 +2113,16 @@ void Assembler::fisttp_s(const Operand& adr) {
}
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(adr);
+ emit(0xDD);
+ emit_operand(1, adr);
+}
+
+
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 1bddb2fb..5d17edf8 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -574,6 +574,13 @@ class Assembler : public Malloced {
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
+ // Repeated moves.
+
+ void repmovsb();
+ void repmovsw();
+ void repmovsl();
+ void repmovsq();
+
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
@@ -1052,6 +1059,7 @@ class Assembler : public Malloced {
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
void fabs();
void fchs();
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0cf68ebb..1a0138f9 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -224,20 +224,17 @@ class FloatingPointHelper : public AllStatic {
Register lhs,
Register rhs);
- // Code pattern for loading a floating point value and converting it
- // to a 32 bit integer. Input value must be either a smi or a heap number
- // object.
- // Returns operands as 32-bit sign extended integers in a general purpose
- // registers.
- static void LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
static void CheckNumberOperands(MacroAssembler* masm,
Label* non_float);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
};
@@ -654,20 +651,29 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -677,6 +683,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -684,143 +695,149 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ Cmp(probe.reg(), Factory::the_hole_value());
- probe.Unuse();
- slow.Branch(not_equal);
- }
-
- if (try_lazy) {
- JumpTarget build_args;
-
- // Get rid of the arguments object probe.
- frame_->Drop();
-
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- // Check that the receiver really is a JavaScript object.
- {
- frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- Condition is_smi = masm_->CheckSmi(receiver.reg());
- build_args.Branch(is_smi);
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // Check that the receiver really is a JavaScript object.
+ __ movq(rax, Operand(rsp, 0));
+ Condition is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
- build_args.Branch(below);
- }
-
- // Verify that we're invoking Function.prototype.apply.
- {
- frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- Condition is_smi = masm_->CheckSmi(apply.reg());
- build_args.Branch(is_smi);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ movq(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, &build_args);
+
+ // Check that applicand.apply is Function.prototype.apply.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+ __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
- apply_code);
- build_args.Branch(not_equal);
- }
-
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ movq(rdi, Operand(rsp, 2 * kPointerSize));
- Condition is_smi = masm_->CheckSmi(rdi);
- build_args.Branch(is_smi);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- build_args.Branch(not_equal);
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
+ __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ is_smi = masm_->CheckSmi(rdi);
+ __ j(is_smi, &build_args);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // rcx is a small non-negative integer, due to the test above.
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rax);
+ // Stack now has 1 element:
+ // rsp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // rsp[0]: receiver
+ // rsp[1]: applicand.apply
+ // rsp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
}
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
- build_args.Branch(above);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ testl(rcx, rcx);
- __ j(zero, &invoke);
- __ bind(&loop);
- __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
- __ decl(rcx);
- __ j(not_zero, &loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(rax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // rsp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -1817,28 +1834,20 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->Drop();
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2); // Drop the original and the copy of the element.
+ } else {
+ // If the reference has size zero then we can use the value below
+ // the reference as if it were above the reference, instead of pushing
+ // a new copy of it above the reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(); // Drop the original of the element.
}
}
}
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2549,7 +2558,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2571,12 +2580,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2602,6 +2626,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2613,13 +2638,15 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
@@ -2787,7 +2814,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -2819,16 +2846,24 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
-
- // Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property, false);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Reference ref(this, property, false);
+ ASSERT(ref.size() == 2);
+ Result key = frame_->Pop();
+ frame_->Dup(); // Duplicate the receiver.
+ frame_->Push(&key);
+ ref.GetValue();
+ // Top of frame contains function to call, with duplicate copy of
+ // receiver below it. Swap them.
+ Result function = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
@@ -3012,6 +3047,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
}
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
Load(node->expression());
switch (op) {
case Token::NOT:
@@ -3021,9 +3059,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
@@ -3042,10 +3077,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
@@ -3167,7 +3202,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so the reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -3622,6 +3659,22 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+ __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3926,7 +3979,8 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
- Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
frame_->Push(&answer);
}
@@ -4239,14 +4293,19 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -4296,6 +4355,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -5014,31 +5074,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -5072,7 +5107,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -5081,34 +5115,35 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ // Go straight to the slow case, with no smi code
+ frame_->Push(&left);
+ frame_->Push(&right);
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = frame_->CallStub(&stub, 2);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ frame_->Push(&left);
+ frame_->Push(&right);
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = frame_->CallStub(&stub, 2);
}
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
}
+ frame_->Push(&answer);
}
@@ -5189,12 +5224,12 @@ void DeferredInlineSmiOperation::Generate() {
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@@ -5205,20 +5240,19 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -5239,15 +5273,15 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
@@ -5261,7 +5295,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
}
@@ -5269,8 +5303,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5288,21 +5322,21 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
shift_value);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5317,15 +5351,14 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
break;
case Token::SHL:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -5342,10 +5375,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -5360,7 +5393,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -5395,7 +5427,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -5423,7 +5455,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Smi::FromInt(int_value - 1));
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break; // This break only applies if we generated code for MOD.
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -5432,22 +5464,24 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need rax as the quotient register, rdx as the remainder
@@ -5529,16 +5563,17 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&quotient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
__ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -5559,7 +5594,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Spill(rcx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -5598,8 +5633,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -5608,7 +5643,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -5662,7 +5697,122 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching. Do not use
+ // root array to load null_value, since it must be patched with
+ // the expected receiver map.
+ masm_->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ SmiToInteger32(index.reg(), key.reg());
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
}
@@ -5795,119 +5945,18 @@ void Reference::GetValue() {
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->movq(kScratchRegister, Factory::null_value(),
- RelocInfo::EMBEDDED_OBJECT);
- masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is a non-negative smi.
- __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ movq(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is rax, the we can reuse that one because the value
- // coming from the deferred code will be in rax.
- Result value = index;
- __ movq(value.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -5944,6 +5993,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -6072,6 +6124,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
+ cgen_->UnloadReference(this);
}
@@ -6213,19 +6266,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
- // TODO(X64): This method is identical to the ia32 version.
- // Either find a reason to change it, or move it somewhere where it can be
- // shared. (Notice: It assumes that a Smi can fit in an int).
-
Object* answer_object = Heap::undefined_value();
switch (op) {
case Token::ADD:
- if (Smi::IsValid(left + right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
answer_object = Smi::FromInt(left + right);
}
break;
case Token::SUB:
- if (Smi::IsValid(left - right)) {
+ // Use intptr_t to detect overflow of 32-bit int.
+ if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
answer_object = Smi::FromInt(left - right);
}
break;
@@ -6299,56 +6350,216 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes rdi and rbx. Dest is rcx. Source cannot be rcx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = rbx;
+ Register scratch2 = rdi;
+ // Get exponent word.
+ __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ subq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(rsp, 0));
+ __ movl(rcx, Operand(rsp, 0)); // Load low word of answer into rcx.
+ __ addq(rsp, Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load rcx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(rcx, rcx);
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmpl(scratch2, Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ movl(scratch2, scratch);
+ __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, Immediate(big_shift_distance));
+ // Get the second half of the double.
+ __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(rcx, Immediate(32 - big_shift_distance));
+ __ or_(rcx, scratch2);
+ // We have the answer in rcx, but we may need to negate it.
+ __ testl(scratch, scratch);
+ __ j(positive, &done);
+ __ neg(rcx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in rcx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ subl(scratch2, Immediate(zero_exponent));
+ // rcx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
+ __ movl(rcx, Immediate(30));
+ __ subl(rcx, scratch2);
+
+ __ bind(&right_exponent);
+ // Here rcx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, Immediate(shift_distance));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, Immediate(32 - shift_distance));
+ __ or_(scratch2, scratch);
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to rcx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(rcx, rcx);
+ __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ movl(rcx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ subl(rcx, scratch2);
+ __ bind(&done);
+ }
+}
+
+
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
+ Label slow, done;
+
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ JumpIfNotSmi(rax, &try_float);
+
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
+
+ // Either zero or Smi::kMinValue, neither of which become a smi when
+ // negated.
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &slow);
- Label slow;
- Label done;
- Label try_float;
- // Check whether the value is a smi.
- __ JumpIfNotSmi(rax, &try_float);
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
- // Enter runtime system if the value of the smi is zero
- // to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue.
- __ SmiNeg(rax, rax, &done);
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(rcx);
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ Integer32ToSmi(rax, rcx);
+ }
- // Either zero or Smi::kMinValue, neither of which become a smi when negated.
- __ SmiCompare(rax, Smi::FromInt(0));
- __ j(not_equal, &slow);
- __ Move(rax, Factory::minus_zero_value());
- __ jmp(&done);
+ // Return from the stub.
+ __ bind(&done);
+ __ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- __ jmp(&done);
-
- // Try floating point case.
- __ bind(&try_float);
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ Cmp(rdx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- // Operand is a float, negate its value by flipping sign bit.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(kScratchRegister, Immediate(0x01));
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- // rdx is value to store.
- if (overwrite_) {
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
- } else {
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // rcx: allocated 'empty' number
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
}
-
- __ bind(&done);
- __ StubReturn(1);
}
@@ -7297,15 +7508,6 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
}
-void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst) {
- // TODO(X64): Convert number operands to int32 values.
- // Don't convert a Smi to a double first.
- UNIMPLEMENTED();
-}
-
-
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
Label load_smi_1, load_smi_2, done_load_1, done;
__ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
@@ -7335,6 +7537,61 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(rdx, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rdx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in rcx.
+ IntegerConvert(masm, rdx, use_sse3, conversion_failure);
+ __ movl(rdx, rcx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rax, rax);
+ __ movl(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ movl(rcx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, rax, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ movl(rax, rdx);
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -7575,7 +7832,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SHL:
case Token::SHR:
case Token::SAR:
- // Move the second operand into register ecx.
+ // Move the second operand into register rcx.
__ movq(rcx, rbx);
// Perform the operation.
switch (op_) {
@@ -7671,44 +7928,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
- // TODO(X64): Don't convert a Smi to float and then back to int32
- // afterwards.
- FloatingPointHelper::LoadFloatOperands(masm);
-
- Label skip_allocation, non_smi_result, operand_conversion_failure;
-
- // Reserve space for converted numbers.
- __ subq(rsp, Immediate(2 * kPointerSize));
-
- if (use_sse3_) {
- // Truncate the operands to 32-bit integers and check for
- // exceptions in doing so.
- CpuFeatures::Scope scope(SSE3);
- __ fisttp_s(Operand(rsp, 0 * kPointerSize));
- __ fisttp_s(Operand(rsp, 1 * kPointerSize));
- __ fnstsw_ax();
- __ testl(rax, Immediate(1));
- __ j(not_zero, &operand_conversion_failure);
- } else {
- // Check if right operand is int32.
- __ fist_s(Operand(rsp, 0 * kPointerSize));
- __ fild_s(Operand(rsp, 0 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
- // Check if left operand is int32.
- __ fist_s(Operand(rsp, 1 * kPointerSize));
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
- }
-
- // Get int32 operands and perform bitop.
- __ pop(rcx);
- __ pop(rax);
+ Label skip_allocation, non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -7756,22 +7977,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
GenerateReturn(masm);
}
- // Clear the FPU exception flag and reset the stack before calling
- // the runtime system.
- __ bind(&operand_conversion_failure);
- __ addq(rsp, Immediate(2 * kPointerSize));
- if (use_sse3_) {
- // If we've used the SSE3 instructions for truncating the
- // floating point values to integers and it failed, we have a
- // pending #IA exception. Clear it.
- __ fnclex();
- } else {
- // The non-SSE3 variant does early bailout if the right
- // operand isn't a 32-bit integer, so we may have a single
- // value on the FPU stack we need to get rid of.
- __ ffree(0);
- }
-
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
@@ -7991,8 +8196,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
- // ecx: length of second string
- // edx: second string
+ // rcx: length of second string
+ // rdx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
@@ -8148,11 +8353,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8173,6 +8378,174 @@ void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
}
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ ASSERT(dest.is(rdi)); // rep movs destination
+ ASSERT(src.is(rsi)); // rep movs source
+ ASSERT(count.is(rcx)); // rep movs count
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ ASSERT_EQ(2, sizeof(uc16)); // NOLINT
+ __ addq(count, count);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ testq(count, Immediate(~7));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ movq(kScratchRegister, count);
+ __ sar(count, Immediate(3)); // Number of doublewords to copy.
+ __ repmovsq();
+
+ // Find number of bytes left.
+ __ movq(count, kScratchRegister);
+ __ and_(count, Immediate(7));
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ testq(count, count);
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(1));
+ __ addq(dest, Immediate(1));
+ __ subq(count, Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: to
+ // rsp[16]: from
+ // rsp[24]: string
+
+ const int kToOffset = 1 * kPointerSize;
+ const int kFromOffset = kToOffset + kPointerSize;
+ const int kStringOffset = kFromOffset + kPointerSize;
+ const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+ // Make sure first argument is a string.
+ __ movq(rax, Operand(rsp, kStringOffset));
+ ASSERT_EQ(0, kSmiTag);
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // Calculate length of sub string using the smi values.
+ __ movq(rcx, Operand(rsp, kToOffset));
+ __ movq(rdx, Operand(rsp, kFromOffset));
+ __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+ __ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
+ __ j(negative, &runtime);
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ SmiToInteger32(rcx, rcx);
+ __ cmpl(rcx, Immediate(2));
+ __ j(below_equal, &runtime);
+
+ // rax: string
+ // rbx: instance type
+ // rcx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
+ __ j(not_equal, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+ __ movq(rsi, rdx); // Restore rsi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ __ bind(&non_ascii_flat);
+ // rax: string
+ // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // rcx: result string length
+ // Check for sequential two byte string
+ __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+ // rax: result string
+ // rcx: result string length
+ __ movq(rdx, rsi); // esi used by following code.
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // Load string argument and locate character of sub string start.
+ __ movq(rsi, Operand(rsp, kStringOffset));
+ __ movq(rbx, Operand(rsp, kFromOffset));
+ {
+ SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+ __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ }
+
+ // rax: result string
+ // rcx: result length
+ // rdx: original value of rsi
+ // rdi: first character of result
+ // rsi: character of sub string start
+ GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+ __ movq(rsi, rdx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(kArgumentsSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
@@ -8241,7 +8614,6 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
Label result_greater;
@@ -8251,13 +8623,11 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
__ Move(rax, Smi::FromInt(GREATER));
- __ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
}
@@ -8287,6 +8657,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 50bb0231..72c84162 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,57 +43,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ bool persist_after_get_;
};
@@ -422,6 +435,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -446,20 +464,20 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
@@ -478,10 +496,10 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -514,6 +532,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -610,8 +629,8 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -712,6 +731,29 @@ class GenericBinaryOpStub: public CodeStub {
};
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+ // not supported.
+ void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be rdi.
+ Register src, // Must be rsi.
+ Register count, // Must be rcx.
+ bool ascii);
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
@@ -719,7 +761,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -731,17 +773,23 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- bool ascii);
-
// Should the stub check whether arguments are strings?
bool string_check_;
};
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {}
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 0b43e766..ce3aae8a 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -114,6 +114,10 @@ static ByteMnemonic zero_operands_instr[] = {
{ 0x9E, UNSET_OP_ORDER, "sahf" },
{ 0x99, UNSET_OP_ORDER, "cdq" },
{ 0x9B, UNSET_OP_ORDER, "fwait" },
+ { 0xA4, UNSET_OP_ORDER, "movs" },
+ { 0xA5, UNSET_OP_ORDER, "movs" },
+ { 0xA6, UNSET_OP_ORDER, "cmps" },
+ { 0xA7, UNSET_OP_ORDER, "cmps" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -157,6 +161,16 @@ enum InstructionType {
};
+enum Prefixes {
+ ESCAPE_PREFIX = 0x0F,
+ OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
+ ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
+ REPNE_PREFIX = 0xF2,
+ REP_PREFIX = 0xF3,
+ REPEQ_PREFIX = REP_PREFIX
+};
+
+
struct InstructionDesc {
const char* mnem;
InstructionType type;
@@ -1128,12 +1142,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
// Scan for prefixes.
while (true) {
current = *data;
- if (current == 0x66) { // Group 3 prefix.
+ if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
operand_size_ = current;
} else if ((current & 0xF0) == 0x40) { // REX prefix.
setRex(current);
if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix.
+ } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
group_1_prefix_ = current;
} else { // Not a prefix - an opcode.
break;
@@ -1145,7 +1159,17 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte_size_operand_ = idesc.byte_size_operation;
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ if (current >= 0xA4 && current <= 0xA7) {
+ // String move or compare operations.
+ if (group_1_prefix_ == REP_PREFIX) {
+ // REP.
+ AppendToBuffer("rep ");
+ }
+ if (rex_w()) AppendToBuffer("REX.W ");
+ AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
+ } else {
+ AppendToBuffer("%s", idesc.mnem, operand_size_code());
+ }
data++;
break;
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 0f284332..a5085cac 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@ namespace internal {
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-x64.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -161,7 +161,7 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) {
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -200,7 +200,7 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -243,7 +243,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -285,7 +285,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -324,7 +324,7 @@ void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
}
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -364,7 +364,7 @@ void FastCodeGenerator::ApplyTOS(Expression::Context context) {
}
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -415,7 +415,7 @@ void FastCodeGenerator::DropAndApply(int count,
}
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -480,7 +480,7 @@ void FastCodeGenerator::Apply(Expression::Context context,
}
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -614,7 +614,7 @@ void FastCodeGenerator::DoTest(Expression::Context context) {
}
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -633,13 +633,13 @@ MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
}
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ movq(destination, location);
}
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -655,7 +655,7 @@ void FastCodeGenerator::Move(Slot* dst,
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -754,7 +754,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
}
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
__ Push(pairs);
@@ -764,7 +764,7 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -782,17 +782,21 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
@@ -805,34 +809,24 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// is no test rax instruction here.
__ nop();
DropAndApply(1, context, rax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, rax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
Apply(context, slot);
+
} else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
+ Comment cmnt(masm_, "Rewritten parameter");
ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
+ // Rewritten parameter accesses are of the form "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -864,7 +858,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
}
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -890,7 +884,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -960,7 +954,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1010,7 +1004,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
@@ -1020,7 +1014,7 @@ void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1028,7 +1022,7 @@ void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1039,11 +1033,16 @@ void FastCodeGenerator::EmitBinaryOp(Token::Value op,
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
+ // Three main cases: non-this global variables, lookup slots, and
+ // all other types of slots. Left-hand-side parameters that rewrite
+ // to explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
+ Slot* slot = var->slot();
if (var->is_global()) {
+ ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
// rcx, and the global object on the stack.
@@ -1054,8 +1053,14 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, rax);
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(rsi); // Context.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, rax);
+
} else if (var->slot() != NULL) {
- Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1078,6 +1083,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
break;
}
Apply(context, result_register());
+
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
@@ -1086,7 +1092,7 @@ void FastCodeGenerator::EmitVariableAssignment(Variable* var,
}
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1177,7 +1183,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1200,7 +1206,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
}
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1218,7 +1224,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
}
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1292,7 +1298,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
}
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1327,7 +1333,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1360,7 +1366,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1464,13 +1470,27 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ Condition is_smi;
+ is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1489,7 +1509,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1505,11 +1525,16 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
- __ push(rax);
}
- // Convert to number.
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ Condition is_smi;
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &no_conversion);
+ __ push(rax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1541,6 +1566,27 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ is_smi = masm_->CheckSmi(rax);
+ __ j(is_smi, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+ } else {
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+ }
+ }
// Call stub for +1/-1.
__ push(rax);
__ Push(Smi::FromInt(1));
@@ -1548,6 +1594,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
+ __ bind(&done);
// Store the value returned in rax.
switch (assign_type) {
@@ -1601,7 +1648,7 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1636,7 +1683,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1748,25 +1795,25 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, rax);
}
-Register FastCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() { return rax; }
-Register FastCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() { return rsi; }
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
}
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
}
@@ -1775,7 +1822,7 @@ void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
// Non-local control flow support.
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
@@ -1789,7 +1836,7 @@ void FastCodeGenerator::EnterFinallyBlock() {
}
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 457ece58..e293247d 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -271,11 +271,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
__ j(below, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
+
+ // Check bit field.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
// Check that the key is a smi.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 65a408b4..b06b8c8a 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -581,6 +581,20 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
}
+Condition MacroAssembler::CheckBothPositiveSmi(Register first,
+ Register second) {
+ if (first.is(second)) {
+ return CheckPositiveSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
+ return zero;
+}
+
+
+
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@@ -660,7 +674,17 @@ void MacroAssembler::SmiSub(Register dst,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
+ if (on_not_smi_result == NULL) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ }
+ Assert(no_overflow, "Smi substraction onverflow");
+ } else if (dst.is(src1)) {
subq(dst, src2);
Label smi_result;
j(no_overflow, &smi_result);
@@ -1292,6 +1316,14 @@ void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
}
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothPositiveSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
Register second_object,
Register scratch1,
@@ -1311,8 +1343,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringBits =
- kNotStringTag | kSeqStringTag | kAsciiStringTag;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -1320,7 +1351,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail);
}
@@ -1518,6 +1549,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ testb(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ce2848c0..8d4a8f2e 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -207,6 +207,9 @@ class MacroAssembler: public Assembler {
// Are both values tagged smis.
Condition CheckBothSmi(Register first, Register second);
+ // Are both values tagged smis.
+ Condition CheckBothPositiveSmi(Register first, Register second);
+
// Are either value a tagged smi.
Condition CheckEitherSmi(Register first, Register second);
@@ -248,6 +251,10 @@ class MacroAssembler: public Assembler {
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+ // Jump if either or both register are not positive smi values.
+ void JumpIfNotBothPositiveSmi(Register src1, Register src2,
+ Label* on_not_both_smi);
+
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
@@ -452,6 +459,15 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 75bbf3e2..6142ce3c 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -71,8 +71,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -82,6 +80,8 @@ namespace internal {
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start of string (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
* - register 1 rbp[-n-8] num_saved_registers_ registers)
* - ...
@@ -661,7 +661,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputStart, -3 * kPointerSize);
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kAtStart, -6 * kPointerSize);
+ ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
__ push(rdi);
__ push(rsi);
__ push(rdx);
@@ -672,6 +672,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ push(rbx); // Callee-save
#endif
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -716,6 +717,15 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ movq(rbx, Operand(rbp, kStartIndex));
+ __ xor_(rcx, rcx); // setcc only operates on cl (lower byte of rcx).
+ __ testq(rbx, rbx);
+ __ setcc(zero, rcx); // 1 if 0 (start of string), 0 if positive.
+ __ movq(Operand(rbp, kAtStart), rcx);
+
if (num_saved_registers_ > 0) {
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 694cba00..c17f2b87 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -138,9 +138,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- // AtStart is passed as 32 bit int (values 0 or 1).
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
@@ -152,9 +150,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex - kPointerSize;
static const int kInputEnd = kInputStart - kPointerSize;
static const int kRegisterOutput = kInputEnd - kPointerSize;
- static const int kAtStart = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput - kPointerSize;
+ static const int kDirectCall = kFrameAlign;
#endif
#ifdef _WIN64
@@ -168,7 +165,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
- static const int kBackup_rbx = kAtStart - kPointerSize;
+ static const int kBackup_rbx = kStackHighEnd - kPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
@@ -176,9 +173,10 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// the frame in GetCode.
static const int kInputStartMinusOne =
kLastCalleeSaveRegister - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 015ba131..a0fc3cbf 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -54,8 +54,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 7e67c00a..a00097b5 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -61,6 +61,27 @@ using ::v8::Extension;
namespace i = ::v8::internal;
+static void ExpectString(const char* code, const char* expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->IsString());
+ String::AsciiValue ascii(result);
+ CHECK_EQ(expected, *ascii);
+}
+
+
+static void ExpectBoolean(const char* code, bool expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->IsBoolean());
+ CHECK_EQ(expected, result->BooleanValue());
+}
+
+
+static void ExpectObject(const char* code, Local<Value> expected) {
+ Local<Value> result = CompileRun(code);
+ CHECK(result->Equals(expected));
+}
+
+
static int signature_callback_count;
static v8::Handle<Value> IncrementingSignatureCallback(
const v8::Arguments& args) {
@@ -2381,6 +2402,36 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
}
+static v8::Handle<Value> IdentityIndexedPropertyGetter(
+ uint32_t index,
+ const AccessorInfo& info) {
+ return v8::Integer::New(index);
+}
+
+
+THREADED_TEST(IndexedInterceptorWithNoSetter) {
+ v8::HandleScope scope;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+
+ const char* code =
+ "try {"
+ " obj[0] = 239;"
+ " for (var i = 0; i < 100; i++) {"
+ " var v = obj[0];"
+ " if (v != 0) throw 'Wrong value ' + v + ' at iteration ' + i;"
+ " }"
+ " 'PASSED'"
+ "} catch(e) {"
+ " e"
+ "}";
+ ExpectString(code, "PASSED");
+}
+
+
THREADED_TEST(MultiContexts) {
v8::HandleScope scope;
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@@ -2467,27 +2518,6 @@ THREADED_TEST(Regress892105) {
}
-static void ExpectString(const char* code, const char* expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->IsString());
- String::AsciiValue ascii(result);
- CHECK_EQ(0, strcmp(*ascii, expected));
-}
-
-
-static void ExpectBoolean(const char* code, bool expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->IsBoolean());
- CHECK_EQ(expected, result->BooleanValue());
-}
-
-
-static void ExpectObject(const char* code, Local<Value> expected) {
- Local<Value> result = CompileRun(code);
- CHECK(result->Equals(expected));
-}
-
-
THREADED_TEST(UndetectableObject) {
v8::HandleScope scope;
LocalContext env;
@@ -2839,12 +2869,16 @@ THREADED_TEST(NativeFunctionConstructCall) {
static const char* exts[1] = { "functiontest" };
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
- CHECK_EQ(v8::Integer::New(8),
- Script::Compile(v8_str("(new A()).data"))->Run());
- CHECK_EQ(v8::Integer::New(7),
- Script::Compile(v8_str("(new B()).data"))->Run());
- CHECK_EQ(v8::Integer::New(6),
- Script::Compile(v8_str("(new C()).data"))->Run());
+ for (int i = 0; i < 10; i++) {
+ // Run a few times to ensure that allocation of objects doesn't
+ // change behavior of a constructor function.
+ CHECK_EQ(v8::Integer::New(8),
+ Script::Compile(v8_str("(new A()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(7),
+ Script::Compile(v8_str("(new B()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(6),
+ Script::Compile(v8_str("(new C()).data"))->Run());
+ }
}
@@ -6202,8 +6236,16 @@ THREADED_TEST(LockUnlockLock) {
}
-static int GetSurvivingGlobalObjectsCount() {
+static int GetGlobalObjectsCount() {
int count = 0;
+ v8::internal::HeapIterator it;
+ for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
+ if (object->IsJSGlobalObject()) count++;
+ return count;
+}
+
+
+static int GetSurvivingGlobalObjectsCount() {
// We need to collect all garbage twice to be sure that everything
// has been collected. This is because inline caches are cleared in
// the first garbage collection but some of the maps have already
@@ -6211,13 +6253,7 @@ static int GetSurvivingGlobalObjectsCount() {
// collected until the second garbage collection.
v8::internal::Heap::CollectAllGarbage(false);
v8::internal::Heap::CollectAllGarbage(false);
- v8::internal::HeapIterator it;
- while (it.has_next()) {
- v8::internal::HeapObject* object = it.next();
- if (object->IsJSGlobalObject()) {
- count++;
- }
- }
+ int count = GetGlobalObjectsCount();
#ifdef DEBUG
if (count > 0) v8::internal::Heap::TracePathToGlobal();
#endif
@@ -8587,17 +8623,6 @@ THREADED_TEST(SpaghettiStackReThrow) {
}
-static int GetGlobalObjectsCount() {
- int count = 0;
- v8::internal::HeapIterator it;
- while (it.has_next()) {
- v8::internal::HeapObject* object = it.next();
- if (object->IsJSGlobalObject()) count++;
- }
- return count;
-}
-
-
TEST(Regress528) {
v8::V8::Initialize();
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index f6e4d046..459b8624 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -224,4 +224,63 @@ TEST(3) {
}
+TEST(4) {
+ // Test the VFP floating point instructions.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles t.a, t.b, and t.c.
+ Assembler assm(NULL, 0);
+ Label L, C;
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
+
+ __ mov(r4, Operand(r0));
+ __ vldr(d6, r4, OFFSET_OF(T, a));
+ __ vldr(d7, r4, OFFSET_OF(T, b));
+ __ vadd(d5, d6, d7);
+ __ vstr(d5, r4, OFFSET_OF(T, c));
+
+ __ vmov(r2, r3, d5);
+ __ vmov(d4, r2, r3);
+ __ vstr(d4, r4, OFFSET_OF(T, b));
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ NULL,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(4.25, t.c);
+ CHECK_EQ(4.25, t.b);
+ CHECK_EQ(1.5, t.a);
+ }
+}
+
#undef __
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 08037b37..05c29d71 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <wchar.h> // wint_t
#include "v8.h"
@@ -73,7 +74,7 @@ v8::Handle<v8::Value> PrintExtension::Print(const v8::Arguments& args) {
uint16_t* string = NewArray<uint16_t>(length + 1);
string_obj->Write(string);
for (int j = 0; j < length; j++)
- printf("%lc", string[j]);
+ printf("%lc", static_cast<wint_t>(string[j]));
DeleteArray(string);
}
printf("\n");
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index cd0da1b8..583a9c2c 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -413,9 +413,7 @@ void CheckDebuggerUnloaded(bool check_functions) {
// Iterate the head and check that there are no debugger related objects left.
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CHECK(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
CHECK(!obj->IsBreakPointInfo());
@@ -2197,13 +2195,25 @@ int Utf16ToAscii(const uint16_t* input_buffer, int length,
// We match parts of the message to get evaluate result int value.
bool GetEvaluateStringResult(char *message, char* buffer, int buffer_size) {
- const char* value = "\"value\":";
- char* pos = strstr(message, value);
- if (pos == NULL) {
+ if (strstr(message, "\"command\":\"evaluate\"") == NULL) {
+ return false;
+ }
+ const char* prefix = "\"text\":\"";
+ char* pos1 = strstr(message, prefix);
+ if (pos1 == NULL) {
+ return false;
+ }
+ pos1 += strlen(prefix);
+ char* pos2 = strchr(pos1, '"');
+ if (pos2 == NULL) {
return false;
}
Vector<char> buf(buffer, buffer_size);
- OS::StrNCpy(buf, pos, buffer_size);
+ int len = static_cast<int>(pos2 - pos1);
+ if (len > buffer_size - 1) {
+ len = buffer_size - 1;
+ }
+ OS::StrNCpy(buf, pos1, len);
buffer[buffer_size - 1] = '\0';
return true;
}
@@ -2303,9 +2313,10 @@ TEST(DebugEvaluateWithoutStack) {
CHECK_EQ(3, process_debug_messages_data.counter);
- CHECK(strcmp("Pinguin", process_debug_messages_data.results[0].buffer));
- CHECK(strcmp("Captbara", process_debug_messages_data.results[1].buffer));
- CHECK(strcmp("805", process_debug_messages_data.results[2].buffer));
+ CHECK_EQ(strcmp("Pinguin", process_debug_messages_data.results[0].buffer), 0);
+ CHECK_EQ(strcmp("Capybara", process_debug_messages_data.results[1].buffer),
+ 0);
+ CHECK_EQ(strcmp("805", process_debug_messages_data.results[2].buffer), 0);
v8::Debug::SetMessageHandler(NULL);
v8::Debug::SetDebugEventListener(NULL);
@@ -3872,6 +3883,23 @@ int GetBreakpointIdFromBreakEventMessage(char *message) {
}
+// We match parts of the message to get total frames number.
+int GetTotalFramesInt(char *message) {
+ const char* prefix = "\"totalFrames\":";
+ char* pos = strstr(message, prefix);
+ if (pos == NULL) {
+ return -1;
+ }
+ pos += strlen(prefix);
+ char* pos_end = pos;
+ int res = static_cast<int>(strtol(pos, &pos_end, 10));
+ if (pos_end == pos) {
+ return -1;
+ }
+ return res;
+}
+
+
/* Test MessageQueues */
/* Tests the message queues that hold debugger commands and
* response messages to the debugger. Fills queues and makes
@@ -4235,7 +4263,12 @@ class BreakpointsV8Thread : public v8::internal::Thread {
class BreakpointsDebuggerThread : public v8::internal::Thread {
public:
+ explicit BreakpointsDebuggerThread(bool global_evaluate)
+ : global_evaluate_(global_evaluate) {}
void Run();
+
+ private:
+ bool global_evaluate_;
};
@@ -4303,24 +4336,51 @@ void BreakpointsDebuggerThread::Run() {
"\"type\":\"request\","
"\"command\":\"setbreakpoint\","
"\"arguments\":{\"type\":\"function\",\"target\":\"dog\",\"line\":3}}";
- const char* command_3 = "{\"seq\":103,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
- const char* command_4 = "{\"seq\":104,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+ const char* command_3;
+ if (this->global_evaluate_) {
+ command_3 = "{\"seq\":103,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false,"
+ "\"global\":true}}";
+ } else {
+ command_3 = "{\"seq\":103,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
+ }
+ const char* command_4;
+ if (this->global_evaluate_) {
+ command_4 = "{\"seq\":104,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"100 + 8\",\"disable_break\":true,"
+ "\"global\":true}}";
+ } else {
+ command_4 = "{\"seq\":104,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+ }
const char* command_5 = "{\"seq\":105,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
const char* command_6 = "{\"seq\":106,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
- const char* command_7 = "{\"seq\":107,"
- "\"type\":\"request\","
- "\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
+ const char* command_7;
+ if (this->global_evaluate_) {
+ command_7 = "{\"seq\":107,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true,"
+ "\"global\":true}}";
+ } else {
+ command_7 = "{\"seq\":107,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
+ }
const char* command_8 = "{\"seq\":108,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
@@ -4377,12 +4437,12 @@ void BreakpointsDebuggerThread::Run() {
v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
}
-BreakpointsDebuggerThread breakpoints_debugger_thread;
-BreakpointsV8Thread breakpoints_v8_thread;
-
-TEST(RecursiveBreakpoints) {
+void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
i::FLAG_debugger_auto_break = true;
+ BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
+ BreakpointsV8Thread breakpoints_v8_thread;
+
// Create a V8 environment
Barriers stack_allocated_breakpoints_barriers;
stack_allocated_breakpoints_barriers.Initialize();
@@ -4395,6 +4455,14 @@ TEST(RecursiveBreakpoints) {
breakpoints_debugger_thread.Join();
}
+TEST(RecursiveBreakpoints) {
+ TestRecursiveBreakpointsGeneric(false);
+}
+
+TEST(RecursiveBreakpointsGlobal) {
+ TestRecursiveBreakpointsGeneric(true);
+}
+
static void DummyDebugEventListener(v8::DebugEvent event,
v8::Handle<v8::Object> exec_state,
@@ -5827,6 +5895,58 @@ TEST(ProcessDebugMessages) {
}
+struct BracktraceData {
+ static int frame_counter;
+ static void MessageHandler(const v8::Debug::Message& message) {
+ char print_buffer[1000];
+ v8::String::Value json(message.GetJSON());
+ Utf16ToAscii(*json, json.length(), print_buffer, 1000);
+
+ if (strstr(print_buffer, "backtrace") == NULL) {
+ return;
+ }
+ frame_counter = GetTotalFramesInt(print_buffer);
+ }
+};
+
+int BracktraceData::frame_counter;
+
+
+// Test that debug messages get processed when ProcessDebugMessages is called.
+TEST(Backtrace) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ v8::Debug::SetMessageHandler2(BracktraceData::MessageHandler);
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+ const char* scripts_command =
+ "{\"seq\":0,"
+ "\"type\":\"request\","
+ "\"command\":\"backtrace\"}";
+
+ // Check backtrace from ProcessDebugMessages.
+ BracktraceData::frame_counter = -10;
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::ProcessDebugMessages();
+ CHECK_EQ(BracktraceData::frame_counter, 0);
+
+ v8::Handle<v8::String> void0 = v8::String::New("void(0)");
+ v8::Handle<v8::Script> script = v8::Script::Compile(void0, void0);
+
+ // Check backtrace from "void(0)" script.
+ BracktraceData::frame_counter = -10;
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ script->Run();
+ CHECK_EQ(BracktraceData::frame_counter, 1);
+
+ // Get rid of the debug message handler.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
TEST(GetMirror) {
v8::HandleScope scope;
DebugLocalContext env;
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index ba4eec26..7b0ad99e 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -101,6 +101,8 @@ TEST(DisasmIa320) {
__ cmp(Operand(ebp, ecx, times_4, 0), Immediate(1000));
Handle<FixedArray> foo2 = Factory::NewFixedArray(10, TENURED);
__ cmp(ebx, foo2);
+ __ cmpb(ebx, Operand(ebp, ecx, times_2, 0));
+ __ cmpb(Operand(ebp, ecx, times_2, 0), ebx);
__ or_(edx, 3);
__ xor_(edx, 3);
__ nop();
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 04e0037b..295b0ee0 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -64,10 +64,8 @@ TEST(ConstructorProfile) {
ConstructorHeapProfileTestHelper cons_profile;
i::AssertNoAllocation no_alloc;
i::HeapIterator iterator;
- while (iterator.has_next()) {
- i::HeapObject* obj = iterator.next();
+ for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
cons_profile.CollectStats(obj);
- }
CHECK_EQ(0, cons_profile.f_count());
cons_profile.PrintStats();
CHECK_EQ(2, cons_profile.f_count());
@@ -375,10 +373,8 @@ TEST(RetainerProfile) {
RetainerHeapProfile ret_profile;
i::AssertNoAllocation no_alloc;
i::HeapIterator iterator;
- while (iterator.has_next()) {
- i::HeapObject* obj = iterator.next();
+ for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
ret_profile.CollectStats(obj);
- }
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
const char* retainers_of_a = printer.GetRetainers("A");
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 17bee5b0..d36286bb 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -746,16 +746,13 @@ static int ObjectsFoundInHeap(Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CHECK(obj != NULL);
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
found_count++;
}
}
}
- CHECK(!iterator.has_next());
return found_count;
}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 68cbc261..8ea9f790 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -47,10 +47,10 @@ static void InitTraceEnv(TickSample* sample) {
static void DoTrace(Address fp) {
- trace_env.sample->fp = reinterpret_cast<uintptr_t>(fp);
+ trace_env.sample->fp = fp;
// sp is only used to define stack high bound
trace_env.sample->sp =
- reinterpret_cast<uintptr_t>(trace_env.sample) - 10240;
+ reinterpret_cast<Address>(trace_env.sample) - 10240;
StackTracer::Trace(trace_env.sample);
}
@@ -315,6 +315,9 @@ TEST(PureJSStackTrace) {
" JSTrace();"
"};\n"
"OuterJSTrace();");
+ // The last JS function called.
+ CHECK_EQ(GetGlobalJSFunction("JSFuncDoTrace")->address(),
+ sample.function);
CHECK_GT(sample.frames_count, 1);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
CheckRetAddrIsInJSFunction("JSTrace",
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 85ff331a..eca2c2b6 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -202,9 +202,9 @@ static int CheckThatProfilerWorks(int log_pos) {
// Force compiler to generate new code by parametrizing source.
EmbeddedVector<char, 100> script_src;
i::OS::SNPrintF(script_src,
- "for (var i = 0; i < 1000; ++i) { "
- "(function(x) { return %d * x; })(i); }",
- log_pos);
+ "function f%d(x) { return %d * x; }"
+ "for (var i = 0; i < 10000; ++i) { f%d(i); }",
+ log_pos, log_pos, log_pos);
// Run code for 200 msecs to get some ticks.
const double end_time = i::OS::TimeCurrentMillis() + 200;
while (i::OS::TimeCurrentMillis() < end_time) {
@@ -228,6 +228,7 @@ static int CheckThatProfilerWorks(int log_pos) {
log_pos += log_size;
// Check buffer contents.
buffer[log_size] = '\0';
+ printf("%s", buffer.start());
const char* tick = "\ntick,";
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
const bool ticks_found = strstr(buffer.start(), tick) != NULL;
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index e56f0f47..5c7b57cf 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -207,6 +207,36 @@ TEST(MarkCompactCollector) {
}
+static Handle<Map> CreateMap() {
+ return Factory::NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+}
+
+
+TEST(MapCompact) {
+ FLAG_max_map_space_pages = 16;
+ InitializeVM();
+
+ {
+ v8::HandleScope sc;
+ // keep allocating maps while pointers are still encodable and thus
+ // mark compact is permitted.
+ Handle<JSObject> root = Factory::NewJSObjectFromMap(CreateMap());
+ do {
+ Handle<Map> map = CreateMap();
+ map->set_prototype(*root);
+ root = Factory::NewJSObjectFromMap(map);
+ } while (Heap::map_space()->MapPointersEncodable());
+ }
+ // Now, as we don't have any handles to just allocated maps, we should
+ // be able to trigger map compaction.
+ // To give an additional chance to fail, try to force compaction which
+ // should be impossible right now.
+ Heap::CollectAllGarbage(true);
+ // And now map pointers should be encodable again.
+ CHECK(Heap::map_space()->MapPointersEncodable());
+}
+
+
static int gc_starts = 0;
static int gc_ends = 0;
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index c72c4d19..b1ca45aa 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -679,16 +679,14 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code,
int start_offset,
const byte* input_start,
const byte* input_end,
- int* captures,
- bool at_start) {
+ int* captures) {
return NativeRegExpMacroAssembler::Execute(
code,
input,
start_offset,
input_start,
input_end,
- captures,
- at_start);
+ captures);
}
@@ -716,8 +714,7 @@ TEST(MacroAssemblerNativeSuccess) {
0,
start_adr,
start_adr + seq_input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(-1, captures[0]);
@@ -760,8 +757,7 @@ TEST(MacroAssemblerNativeSimple) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -778,8 +774,7 @@ TEST(MacroAssemblerNativeSimple) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -820,8 +815,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -839,8 +833,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
0,
start_adr,
start_adr + input->length() * 2,
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -877,8 +870,7 @@ TEST(MacroAssemblerNativeBacktrack) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::FAILURE, result);
}
@@ -920,8 +912,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -969,8 +960,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
0,
start_adr,
start_adr + input->length() * 2,
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1022,8 +1012,7 @@ TEST(MacroAssemblernativeAtStart) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
@@ -1032,8 +1021,7 @@ TEST(MacroAssemblernativeAtStart) {
3,
start_adr + 3,
start_adr + input->length(),
- NULL,
- false);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
}
@@ -1084,8 +1072,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1184,8 +1171,7 @@ TEST(MacroAssemblerNativeRegisters) {
0,
start_adr,
start_adr + input->length(),
- output,
- true);
+ output);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1225,8 +1211,7 @@ TEST(MacroAssemblerStackOverflow) {
0,
start_adr,
start_adr + input->length(),
- NULL,
- true);
+ NULL);
CHECK_EQ(NativeRegExpMacroAssembler::EXCEPTION, result);
CHECK(Top::has_pending_exception());
@@ -1271,8 +1256,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
0,
start_adr,
start_adr + input->length(),
- captures,
- true);
+ captures);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, captures[0]);
@@ -1650,6 +1634,163 @@ TEST(CanonicalizeCharacterSets) {
ASSERT_EQ(30, list->at(0).to());
}
+// Checks whether a character is in the set represented by a list of ranges.
+static bool CharacterInSet(ZoneList<CharacterRange>* set, uc16 value) {
+ for (int i = 0; i < set->length(); i++) {
+ CharacterRange range = set->at(i);
+ if (range.from() <= value && value <= range.to()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+TEST(CharacterRangeMerge) {
+ ZoneScope zone_scope(DELETE_ON_EXIT);
+ ZoneList<CharacterRange> l1(4);
+ ZoneList<CharacterRange> l2(4);
+ // Create all combinations of intersections of ranges, both singletons and
+ // longer.
+
+ int offset = 0;
+
+ // The five kinds of singleton intersections:
+ // X
+ // Y - outside before
+ // Y - outside touching start
+ // Y - overlap
+ // Y - outside touching end
+ // Y - outside after
+
+ for (int i = 0; i < 5; i++) {
+ l1.Add(CharacterRange::Singleton(offset + 2));
+ l2.Add(CharacterRange::Singleton(offset + i));
+ offset += 6;
+ }
+
+ // The seven kinds of singleton/non-singleton intersections:
+ // XXX
+ // Y - outside before
+ // Y - outside touching start
+ // Y - inside touching start
+ // Y - entirely inside
+ // Y - inside touching end
+ // Y - outside touching end
+ // Y - disjoint after
+
+ for (int i = 0; i < 7; i++) {
+ l1.Add(CharacterRange::Range(offset + 2, offset + 4));
+ l2.Add(CharacterRange::Singleton(offset + i));
+ offset += 8;
+ }
+
+ // The eleven kinds of non-singleton intersections:
+ //
+ // XXXXXXXX
+ // YYYY - outside before.
+ // YYYY - outside touching start.
+ // YYYY - overlapping start
+ // YYYY - inside touching start
+ // YYYY - entirely inside
+ // YYYY - inside touching end
+ // YYYY - overlapping end
+ // YYYY - outside touching end
+ // YYYY - outside after
+ // YYYYYYYY - identical
+ // YYYYYYYYYYYY - containing entirely.
+
+ for (int i = 0; i < 9; i++) {
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15)); // Length 8.
+ l2.Add(CharacterRange::Range(offset + 2 * i, offset + 2 * i + 3));
+ offset += 22;
+ }
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15));
+ l2.Add(CharacterRange::Range(offset + 6, offset + 15));
+ offset += 22;
+ l1.Add(CharacterRange::Range(offset + 6, offset + 15));
+ l2.Add(CharacterRange::Range(offset + 4, offset + 17));
+ offset += 22;
+
+ // Different kinds of multi-range overlap:
+ // XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
+ // YYYY Y YYYY Y YYYY Y YYYY Y YYYY Y YYYY Y
+
+ l1.Add(CharacterRange::Range(offset, offset + 21));
+ l1.Add(CharacterRange::Range(offset + 31, offset + 52));
+ for (int i = 0; i < 6; i++) {
+ l2.Add(CharacterRange::Range(offset + 2, offset + 5));
+ l2.Add(CharacterRange::Singleton(offset + 8));
+ offset += 9;
+ }
+
+ ASSERT(CharacterRange::IsCanonical(&l1));
+ ASSERT(CharacterRange::IsCanonical(&l2));
+
+ ZoneList<CharacterRange> first_only(4);
+ ZoneList<CharacterRange> second_only(4);
+ ZoneList<CharacterRange> both(4);
+
+ // Merge one direction.
+ CharacterRange::Merge(&l1, &l2, &first_only, &second_only, &both);
+
+ CHECK(CharacterRange::IsCanonical(&first_only));
+ CHECK(CharacterRange::IsCanonical(&second_only));
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
+ CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ first_only.Clear();
+ second_only.Clear();
+ both.Clear();
+
+ // Merge other direction.
+ CharacterRange::Merge(&l2, &l1, &second_only, &first_only, &both);
+
+ CHECK(CharacterRange::IsCanonical(&first_only));
+ CHECK(CharacterRange::IsCanonical(&second_only));
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
+ CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ first_only.Clear();
+ second_only.Clear();
+ both.Clear();
+
+ // Merge but don't record all combinations.
+ CharacterRange::Merge(&l1, &l2, NULL, NULL, &both);
+
+ CHECK(CharacterRange::IsCanonical(&both));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first && in_second) == CharacterInSet(&both, i));
+ }
+
+ // Merge into same set.
+ ZoneList<CharacterRange> all(4);
+ CharacterRange::Merge(&l1, &l2, &all, &all, &all);
+
+ CHECK(CharacterRange::IsCanonical(&all));
+
+ for (uc16 i = 0; i < offset; i++) {
+ bool in_first = CharacterInSet(&l1, i);
+ bool in_second = CharacterInSet(&l2, i);
+ CHECK((in_first || in_second) == CharacterInSet(&all, i));
+ }
+}
TEST(Graph) {
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 6d074266..81b6b713 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -39,6 +39,8 @@
#include "cctest.h"
#include "spaces.h"
#include "objects.h"
+#include "natives.h"
+#include "bootstrapper.h"
using namespace v8::internal;
@@ -169,6 +171,75 @@ TEST(ExternalReferenceDecoder) {
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ file_name_ = snapshot_file;
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+ virtual int Position() {
+ return ftell(fp_);
+ }
+ void WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used);
+
+ private:
+ FILE* fp_;
+ const char* file_name_;
+};
+
+
+void FileByteSink::WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used) {
+ int file_name_length = strlen(file_name_) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name_);
+ FILE* fp = OS::FOpen(name.start(), "w");
+ fprintf(fp, "new %d\n", new_space_used);
+ fprintf(fp, "pointer %d\n", pointer_space_used);
+ fprintf(fp, "data %d\n", data_space_used);
+ fprintf(fp, "code %d\n", code_space_used);
+ fprintf(fp, "map %d\n", map_space_used);
+ fprintf(fp, "cell %d\n", cell_space_used);
+ fprintf(fp, "large %d\n", large_space_used);
+ fclose(fp);
+}
+
+
+static bool WriteToFile(const char* snapshot_file) {
+ FileByteSink file(snapshot_file);
+ StartupSerializer ser(&file);
+ ser.Serialize();
+ return true;
+}
+
+
static void Serialize() {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
@@ -176,7 +247,7 @@ static void Serialize() {
// that would confuse the serialization/deserialization process.
v8::Persistent<v8::Context> env = v8::Context::New();
env.Dispose();
- Snapshot::WriteToFile(FLAG_testing_serialization_file);
+ WriteToFile(FLAG_testing_serialization_file);
}
@@ -273,95 +344,63 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
}
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- file_name_ = snapshot_file;
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
- }
- }
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
+TEST(PartialSerialization) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ ASSERT(!env.IsEmpty());
+ env->Enter();
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Bootstrapper::NativesSourceLookup(i);
}
}
- virtual int Position() {
- return ftell(fp_);
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+ ASSERT(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
}
- void WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int large_space_used);
- private:
- FILE* fp_;
- const char* file_name_;
-};
+ int file_name_length = strlen(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ env->Exit();
+ env.Dispose();
-void FileByteSink::WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int large_space_used) {
- int file_name_length = strlen(file_name_) + 10;
- Vector<char> name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(name, "%s.size", file_name_);
- FILE* fp = OS::FOpen(name.start(), "w");
- fprintf(fp, "new %d\n", new_space_used);
- fprintf(fp, "pointer %d\n", pointer_space_used);
- fprintf(fp, "data %d\n", data_space_used);
- fprintf(fp, "code %d\n", code_space_used);
- fprintf(fp, "map %d\n", map_space_used);
- fprintf(fp, "cell %d\n", cell_space_used);
- fprintf(fp, "large %d\n", large_space_used);
- fclose(fp);
+ FileByteSink startup_sink(startup_name.start());
+ StartupSerializer startup_serializer(&startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(&startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_foo);
+ startup_serializer.SerializeWeakReferences();
+ partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
}
-TEST(PartialSerialization) {
- Serializer::Enable();
- v8::V8::Initialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
- env->Enter();
-
- v8::HandleScope handle_scope;
- v8::Local<v8::String> foo = v8::String::New("foo");
-
- FileByteSink file(FLAG_testing_serialization_file);
- Serializer ser(&file);
- i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
- Object* raw_foo = *internal_foo;
- ser.SerializePartial(&raw_foo);
- file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
- ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- ser.CurrentAllocationAddress(CODE_SPACE),
- ser.CurrentAllocationAddress(MAP_SPACE),
- ser.CurrentAllocationAddress(CELL_SPACE),
- ser.CurrentAllocationAddress(LO_SPACE));
-}
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+ int file_name_length = strlen(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ CHECK(Snapshot::Initialize(startup_name.start()));
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
- v8::V8::Initialize();
const char* file_name = FLAG_testing_serialization_file;
- int file_name_length = strlen(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
@@ -392,11 +431,25 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
large_size);
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
+
Object* root;
- deserializer.DeserializePartial(&root);
- CHECK(root->IsString());
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root);
+ CHECK(root->IsString());
+ }
+ v8::HandleScope handle_scope;
+ Handle<Object>root_handle(root);
+
+ Object* root2;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root2);
+ CHECK(root2->IsString());
+ CHECK(*root_handle == root2);
+ }
}
diff --git a/test/message/bugs/.svn/entries b/test/message/bugs/.svn/entries
index 30c6935f..f3f3e96c 100644
--- a/test/message/bugs/.svn/entries
+++ b/test/message/bugs/.svn/entries
@@ -1,9 +1,9 @@
8
dir
-3649
-http://v8.googlecode.com/svn/branches/bleeding_edge/test/message/bugs
-http://v8.googlecode.com/svn
+3720
+https://v8.googlecode.com/svn/branches/bleeding_edge/test/message/bugs
+https://v8.googlecode.com/svn
diff --git a/test/mjsunit/compiler/unary-add.js b/test/mjsunit/compiler/unary-add.js
new file mode 100644
index 00000000..b1fc0c2c
--- /dev/null
+++ b/test/mjsunit/compiler/unary-add.js
@@ -0,0 +1,67 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test unary addition in various contexts.
+
+// Test value context.
+assertEquals(1, +'1');
+assertEquals(1, +1);
+assertEquals(1.12, +1.12);
+assertEquals(NaN, +undefined);
+assertEquals(NaN, +{});
+
+// Test effect context.
+assertEquals(1, eval("+'1'; 1"));
+assertEquals(1, eval("+1; 1"));
+assertEquals(1, eval("+1.12; 1"));
+assertEquals(1, eval("+undefined; 1"));
+assertEquals(1, eval("+{}; 1"));
+
+// Test test context.
+assertEquals(1, (+'1') ? 1 : 2);
+assertEquals(1, (+1) ? 1 : 2);
+assertEquals(1, (+'0') ? 2 : 1);
+assertEquals(1, (+0) ? 2 : 1);
+assertEquals(1, (+1.12) ? 1 : 2);
+assertEquals(1, (+undefined) ? 2 : 1);
+assertEquals(1, (+{}) ? 2 : 1);
+
+// Test value/test context.
+assertEquals(1, +'1' || 2);
+assertEquals(1, +1 || 2);
+assertEquals(1.12, +1.12 || 2);
+assertEquals(2, +undefined || 2);
+assertEquals(2, +{} || 2);
+
+// Test test/value context.
+assertEquals(2, +'1' && 2);
+assertEquals(2, +1 && 2);
+assertEquals(0, +'0' && 2);
+assertEquals(0, +0 && 2);
+assertEquals(2, +1.12 && 2);
+assertEquals(NaN, +undefined && 2);
+assertEquals(NaN, +{} && 2);
diff --git a/test/mjsunit/debug-step.js b/test/mjsunit/debug-step.js
index 45342186..a887514a 100644
--- a/test/mjsunit/debug-step.js
+++ b/test/mjsunit/debug-step.js
@@ -79,4 +79,4 @@ f();
assertEquals(0, result);
// Get rid of the debug event listener.
-Debug.setListener(null); \ No newline at end of file
+Debug.setListener(null);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 41388a37..f1752b9f 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -45,8 +45,8 @@ array-constructor: PASS || TIMEOUT
# Very slow on ARM, contains no architecture dependent code.
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
-# Skip long running test in debug.
-regress/regress-524: PASS, SKIP if $mode == debug
+# Skip long running test in debug and allow it to timeout in release mode.
+regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
[ $arch == arm ]
diff --git a/test/mjsunit/bugs/bug-223.js b/test/mjsunit/regress/regress-580.js
index 04b296b9..c6b3db7a 100644
--- a/test/mjsunit/bugs/bug-223.js
+++ b/test/mjsunit/regress/regress-580.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,15 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// When calling user-defined functions on strings, booleans or
-// numbers, we should create a wrapper object.
+// Test constant folding of smi operations that overflow a 32-bit int
+// See http://code.google.com/p/v8/issues/detail?id=580
-function TypeOfThis() { return typeof this; }
+function num_ops() {
+ var x;
+ var tmp = 0;
+ x = (tmp = 1578221999, tmp)+(tmp = 572285336, tmp);
+ assertEquals(2150507335, x);
+ x = 1578221999 + 572285336;
+ assertEquals(2150507335, x);
-String.prototype.TypeOfThis = TypeOfThis;
-Boolean.prototype.TypeOfThis = TypeOfThis;
-Number.prototype.TypeOfThis = TypeOfThis;
+ x = (tmp = -1500000000, tmp)+(tmp = -2000000000, tmp);
+ assertEquals(-3500000000, x);
+ x = -1500000000 + -2000000000;
+ assertEquals(-3500000000, x);
-assertEquals('object', 'xxx'.TypeOfThis());
-assertEquals('object', true.TypeOfThis());
-assertEquals('object', (42).TypeOfThis());
+ x = (tmp = 1578221999, tmp)-(tmp = -572285336, tmp);
+ assertEquals(2150507335, x);
+ x = 1578221999 - -572285336;
+ assertEquals(2150507335, x);
+
+ x = (tmp = -1500000000, tmp)-(tmp = 2000000000, tmp);
+ assertEquals(-3500000000, x);
+ x = -1500000000 - 2000000000;
+ assertEquals(-3500000000, x);
+}
+
+num_ops();
diff --git a/test/mjsunit/tools/logreader.js b/test/mjsunit/tools/logreader.js
index 8ed5ffd2..8b747895 100644
--- a/test/mjsunit/tools/logreader.js
+++ b/test/mjsunit/tools/logreader.js
@@ -67,7 +67,7 @@
var reader = new devtools.profiler.LogReader({});
assertEquals([0x10000000, 0x10001000, 0xffff000, 0x10000000],
- reader.processStack(0x10000000, ['overflow',
+ reader.processStack(0x10000000, 0, ['overflow',
'+1000', '-2000', '+1000']));
})();
diff --git a/test/mjsunit/tools/tickprocessor-test.func-info b/test/mjsunit/tools/tickprocessor-test.func-info
new file mode 100644
index 00000000..a66b90f4
--- /dev/null
+++ b/test/mjsunit/tools/tickprocessor-test.func-info
@@ -0,0 +1,29 @@
+Statistical profiling result from v8.log, (3 ticks, 0 unaccounted, 0 excluded).
+
+ [Shared libraries]:
+ ticks total nonlib name
+
+ [JavaScript]:
+ ticks total nonlib name
+ 2 66.7% 66.7% Stub: CompareStub_GE
+ 1 33.3% 33.3% LazyCompile: DrawLine 3d-cube.js:17
+
+ [C++]:
+ ticks total nonlib name
+
+ [GC]:
+ ticks total nonlib name
+ 0 0.0%
+
+ [Bottom up (heavy) profile]:
+ Note: percentage shows a share of a particular caller in the total
+ amount of its parent calls.
+ Callers occupying less than 2.0% are not shown.
+
+ ticks parent name
+ 2 66.7% Stub: CompareStub_GE
+ 2 100.0% LazyCompile: DrawLine 3d-cube.js:17
+ 2 100.0% LazyCompile: DrawQube 3d-cube.js:188
+
+ 1 33.3% LazyCompile: DrawLine 3d-cube.js:17
+ 1 100.0% LazyCompile: DrawQube 3d-cube.js:188
diff --git a/test/mjsunit/tools/tickprocessor-test.log b/test/mjsunit/tools/tickprocessor-test.log
index 75daad6b..80e7ec1a 100644
--- a/test/mjsunit/tools/tickprocessor-test.log
+++ b/test/mjsunit/tools/tickprocessor-test.log
@@ -6,19 +6,20 @@ code-creation,Stub,0xf540a100,474,"CEntryStub"
code-creation,Script,0xf541cd80,736,"exp.js"
code-creation,Stub,0xf541d0e0,47,"RuntimeStub_Math_exp"
code-creation,LazyCompile,0xf541d120,145,"exp native math.js:41"
+function-creation,0xf441d280,0xf541d120
code-creation,LoadIC,0xf541d280,117,"j"
code-creation,LoadIC,0xf541d360,63,"i"
-tick,0x80f82d1,0xffdfe880,0,0xf541ce5c
-tick,0x80f89a1,0xffdfecf0,0,0xf541ce5c
-tick,0x8123b5c,0xffdff1a0,0,0xf541d1a1,0xf541ceea
-tick,0x8123b65,0xffdff1a0,0,0xf541d1a1,0xf541ceea
-tick,0xf541d2be,0xffdff1e4,0
-tick,0xf541d320,0xffdff1dc,0
-tick,0xf541d384,0xffdff1d8,0
-tick,0xf7db94da,0xffdff0ec,0,0xf541d1a1,0xf541ceea
-tick,0xf7db951c,0xffdff0f0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbc508,0xffdff14c,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbff21,0xffdff198,0,0xf541d1a1,0xf541ceea
-tick,0xf7edec90,0xffdff0ec,0,0xf541d1a1,0xf541ceea
-tick,0xffffe402,0xffdff488,0
+tick,0x80f82d1,0xffdfe880,0,0,0xf541ce5c
+tick,0x80f89a1,0xffdfecf0,0,0,0xf541ce5c
+tick,0x8123b5c,0xffdff1a0,0,0,0xf541d1a1,0xf541ceea
+tick,0x8123b65,0xffdff1a0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf541d2be,0xffdff1e4,0,0
+tick,0xf541d320,0xffdff1dc,0,0
+tick,0xf541d384,0xffdff1d8,0,0
+tick,0xf7db94da,0xffdff0ec,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7db951c,0xffdff0f0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbc508,0xffdff14c,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbff21,0xffdff198,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7edec90,0xffdff0ec,0,0,0xf541d1a1,0xf541ceea
+tick,0xffffe402,0xffdff488,0,0
profiler,"end"
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index 83bdac8a..abcde897 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -334,7 +334,7 @@ function PrintMonitor(outputOrFileName) {
print = function(str) {
var strSplit = str.split('\n');
for (var i = 0; i < strSplit.length; ++i) {
- s = strSplit[i];
+ var s = strSplit[i];
realOut.push(s);
if (outputPos < expectedOut.length) {
if (expectedOut[outputPos] != s) {
@@ -400,7 +400,10 @@ function driveTickProcessorTest(
'tickprocessor-test.log', 'tickprocessor-test.ignore-unknown'],
'GcState': [
false, false, TickProcessor.VmStates.GC,
- 'tickprocessor-test.log', 'tickprocessor-test.gc-state']
+ 'tickprocessor-test.log', 'tickprocessor-test.gc-state'],
+ 'FunctionInfo': [
+ false, false, null,
+ 'tickprocessor-test-func-info.log', 'tickprocessor-test.func-info']
};
for (var testName in testData) {
print('=== testProcessing-' + testName + ' ===');
diff --git a/test/mjsunit/value-wrapper.js b/test/mjsunit/value-wrapper.js
index 33ef013c..88330b44 100644
--- a/test/mjsunit/value-wrapper.js
+++ b/test/mjsunit/value-wrapper.js
@@ -28,6 +28,9 @@
// When calling user-defined functions on strings, booleans or
// numbers, we should create a wrapper object.
+// When running the tests use loops to ensure that the call site moves through
+// the different IC states and that both the runtime system and the generated
+// IC code is tested.
function RunTests() {
for (var i = 0; i < 10; i++) {
assertEquals('object', 'xxx'.TypeOfThis());
@@ -77,6 +80,22 @@ function RunTests() {
assertEquals('object', (42)[7]());
assertEquals('object', (3.14)[7]());
}
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', typeof 'xxx'.ObjectValueOf());
+ assertEquals('object', typeof true.ObjectValueOf());
+ assertEquals('object', typeof false.ObjectValueOf());
+ assertEquals('object', typeof (42).ObjectValueOf());
+ assertEquals('object', typeof (3.14).ObjectValueOf());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('[object String]', 'xxx'.ObjectToString());
+ assertEquals('[object Boolean]', true.ObjectToString());
+ assertEquals('[object Boolean]', false.ObjectToString());
+ assertEquals('[object Number]', (42).ObjectToString());
+ assertEquals('[object Number]', (3.14).ObjectToString());
+ }
}
function TypeOfThis() { return typeof this; }
@@ -87,7 +106,14 @@ Boolean.prototype.TypeOfThis = TypeOfThis;
Number.prototype.TypeOfThis = TypeOfThis;
Boolean.prototype[7] = TypeOfThis;
Number.prototype[7] = TypeOfThis;
-
+
+String.prototype.ObjectValueOf = Object.prototype.valueOf;
+Boolean.prototype.ObjectValueOf = Object.prototype.valueOf;
+Number.prototype.ObjectValueOf = Object.prototype.valueOf;
+
+String.prototype.ObjectToString = Object.prototype.toString;
+Boolean.prototype.ObjectToString = Object.prototype.toString;
+Number.prototype.ObjectToString = Object.prototype.toString;
RunTests();
diff --git a/tools/codemap.js b/tools/codemap.js
index af511f64..8eb2acbc 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -196,6 +196,18 @@ devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
/**
+ * Returns a dynamic code entry using its starting address.
+ *
+ * @param {number} addr Address.
+ */
+devtools.profiler.CodeMap.prototype.findDynamicEntryByStartAddress =
+ function(addr) {
+ var node = this.dynamics_.find(addr);
+ return node ? node.value : null;
+};
+
+
+/**
* Returns an array of all dynamic code entries.
*/
devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() {
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 4368eb81..5c879b3e 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -267,8 +267,6 @@
'../../src/execution.h',
'../../src/factory.cc',
'../../src/factory.h',
- '../../src/fast-codegen.cc',
- '../../src/fast-codegen.h',
'../../src/flag-definitions.h',
'../../src/flags.cc',
'../../src/flags.h',
@@ -277,6 +275,8 @@
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
+ '../../src/full-codegen.cc',
+ '../../src/full-codegen.h',
'../../src/func-name-inferrer.cc',
'../../src/func-name-inferrer.h',
'../../src/global-handles.cc',
@@ -408,9 +408,9 @@
'../../src/arm/cpu-arm.cc',
'../../src/arm/debug-arm.cc',
'../../src/arm/disasm-arm.cc',
- '../../src/arm/fast-codegen-arm.cc',
'../../src/arm/frames-arm.cc',
'../../src/arm/frames-arm.h',
+ '../../src/arm/full-codegen-arm.cc',
'../../src/arm/ic-arm.cc',
'../../src/arm/jump-target-arm.cc',
'../../src/arm/macro-assembler-arm.cc',
@@ -446,9 +446,9 @@
'../../src/ia32/cpu-ia32.cc',
'../../src/ia32/debug-ia32.cc',
'../../src/ia32/disasm-ia32.cc',
- '../../src/ia32/fast-codegen-ia32.cc',
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
+ '../../src/ia32/full-codegen-ia32.cc',
'../../src/ia32/ic-ia32.cc',
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/macro-assembler-ia32.cc',
@@ -475,9 +475,9 @@
'../../src/x64/cpu-x64.cc',
'../../src/x64/debug-x64.cc',
'../../src/x64/disasm-x64.cc',
- '../../src/x64/fast-codegen-x64.cc',
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
+ '../../src/x64/full-codegen-x64.cc',
'../../src/x64/ic-x64.cc',
'../../src/x64/jump-target-x64.cc',
'../../src/x64/macro-assembler-x64.cc',
diff --git a/tools/logreader.js b/tools/logreader.js
index 88ab9077..20a1f544 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -139,11 +139,12 @@ devtools.profiler.LogReader.prototype.processLogChunk = function(chunk) {
* Processes stack record.
*
* @param {number} pc Program counter.
+ * @param {number} func JS Function.
* @param {Array.<string>} stack String representation of a stack.
* @return {Array.<number>} Processed stack.
*/
-devtools.profiler.LogReader.prototype.processStack = function(pc, stack) {
- var fullStack = [pc];
+devtools.profiler.LogReader.prototype.processStack = function(pc, func, stack) {
+ var fullStack = func ? [pc, func] : [pc];
var prevFrame = pc;
for (var i = 0, n = stack.length; i < n; ++i) {
var frame = stack[i];
diff --git a/tools/profile.js b/tools/profile.js
index d41f5cd1..b2de6490 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -43,6 +43,11 @@ devtools.profiler.Profile = function() {
this.bottomUpTree_ = new devtools.profiler.CallTree();
};
+/**
+ * Version of profiler log.
+ */
+devtools.profiler.Profile.VERSION = 2;
+
/**
* Returns whether a function with the specified name must be skipped.
@@ -134,6 +139,21 @@ devtools.profiler.Profile.prototype.addCode = function(
/**
+ * Creates an alias entry for a code entry.
+ *
+ * @param {number} aliasAddr Alias address.
+ * @param {number} addr Code entry address.
+ */
+devtools.profiler.Profile.prototype.addCodeAlias = function(
+ aliasAddr, addr) {
+ var entry = this.codeMap_.findDynamicEntryByStartAddress(addr);
+ if (entry) {
+ this.codeMap_.addCode(aliasAddr, entry);
+ }
+};
+
+
+/**
* Reports about moving of a dynamic code entry.
*
* @param {number} from Current code entry address.
@@ -163,6 +183,31 @@ devtools.profiler.Profile.prototype.deleteCode = function(start) {
/**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+devtools.profiler.Profile.prototype.safeMoveDynamicCode = function(from, to) {
+ if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+ this.codeMap_.moveCode(from, to);
+ }
+};
+
+
+/**
+ * Reports about deletion of a dynamic code entry.
+ *
+ * @param {number} start Starting address.
+ */
+devtools.profiler.Profile.prototype.safeDeleteDynamicCode = function(start) {
+ if (this.codeMap_.findDynamicEntryByStartAddress(start)) {
+ this.codeMap_.deleteCode(start);
+ }
+};
+
+
+/**
* Retrieves a code entry by an address.
*
* @param {number} addr Entry address.
@@ -362,6 +407,13 @@ devtools.profiler.Profile.DynamicCodeEntry.prototype.getRawName = function() {
};
+devtools.profiler.Profile.DynamicCodeEntry.prototype.isJSFunction = function() {
+ return this.type == "Function" ||
+ this.type == "LazyCompile" ||
+ this.type == "Script";
+};
+
+
/**
* Constructs a call graph.
*
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index c566c22a..35422e2e 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -137,10 +137,19 @@ function TickProcessor(
processor: this.processCodeMove, backrefs: true },
'code-delete': { parsers: [this.createAddressParser('code')],
processor: this.processCodeDelete, backrefs: true },
+ 'function-creation': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('function-obj')],
+ processor: this.processFunctionCreation, backrefs: true },
+ 'function-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processFunctionMove, backrefs: true },
+ 'function-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processFunctionDelete, backrefs: true },
'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
processor: this.processSnapshotPosition, backrefs: true },
'tick': { parsers: [this.createAddressParser('code'),
- this.createAddressParser('stack'), parseInt, 'var-args'],
+ this.createAddressParser('stack'),
+ this.createAddressParser('func'), parseInt, 'var-args'],
processor: this.processTick, backrefs: true },
'heap-sample-begin': { parsers: [null, null, parseInt],
processor: this.processHeapSampleBegin },
@@ -287,6 +296,22 @@ TickProcessor.prototype.processCodeDelete = function(start) {
};
+TickProcessor.prototype.processFunctionCreation = function(
+ functionAddr, codeAddr) {
+ this.profile_.addCodeAlias(functionAddr, codeAddr);
+};
+
+
+TickProcessor.prototype.processFunctionMove = function(from, to) {
+ this.profile_.safeMoveDynamicCode(from, to);
+};
+
+
+TickProcessor.prototype.processFunctionDelete = function(start) {
+ this.profile_.safeDeleteDynamicCode(start);
+};
+
+
TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
if (this.snapshotLogProcessor_) {
this.deserializedEntriesNames_[addr] =
@@ -300,7 +325,7 @@ TickProcessor.prototype.includeTick = function(vmState) {
};
-TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
+TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) {
this.ticks_.total++;
if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
if (!this.includeTick(vmState)) {
@@ -308,7 +333,19 @@ TickProcessor.prototype.processTick = function(pc, sp, vmState, stack) {
return;
}
- this.profile_.recordTick(this.processStack(pc, stack));
+ if (func) {
+ var funcEntry = this.profile_.findEntry(func);
+ if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+ func = 0;
+ } else {
+ var currEntry = this.profile_.findEntry(pc);
+ if (!currEntry || !currEntry.isJSFunction || currEntry.isJSFunction()) {
+ func = 0;
+ }
+ }
+ }
+
+ this.profile_.recordTick(this.processStack(pc, func, stack));
};
@@ -341,7 +378,7 @@ TickProcessor.prototype.processJSProducer = function(constructor, stack) {
if (stack.length == 0) return;
var first = stack.shift();
var processedStack =
- this.profile_.resolveAndFilterFuncs_(this.processStack(first, stack));
+ this.profile_.resolveAndFilterFuncs_(this.processStack(first, 0, stack));
processedStack.unshift(constructor);
this.currentProducerProfile_.addPath(processedStack);
};
diff --git a/tools/tickprocessor.py b/tools/tickprocessor.py
index cc540d3d..c932e3fc 100644
--- a/tools/tickprocessor.py
+++ b/tools/tickprocessor.py
@@ -59,6 +59,8 @@ class CodeEntry(object):
def IsICEntry(self):
return False
+ def IsJSFunction(self):
+ return False
class SharedLibraryEntry(CodeEntry):
@@ -124,6 +126,8 @@ class JSCodeEntry(CodeEntry):
return self.type in ('CallIC', 'LoadIC', 'StoreIC') or \
(self.type == 'Builtin' and self.builtin_ic_re.match(self.name))
+ def IsJSFunction(self):
+ return self.type in ('Function', 'LazyCompile', 'Script')
class CodeRegion(object):
@@ -212,13 +216,19 @@ class TickProcessor(object):
for row in logreader:
row_num += 1
if row[0] == 'tick':
- self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3]), self.PreprocessStack(row[4:]))
+ self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3], 16), int(row[4]), self.PreprocessStack(row[5:]))
elif row[0] == 'code-creation':
self.ProcessCodeCreation(row[1], int(row[2], 16), int(row[3]), row[4])
elif row[0] == 'code-move':
self.ProcessCodeMove(int(row[1], 16), int(row[2], 16))
elif row[0] == 'code-delete':
self.ProcessCodeDelete(int(row[1], 16))
+ elif row[0] == 'function-creation':
+ self.ProcessFunctionCreation(int(row[1], 16), int(row[2], 16))
+ elif row[0] == 'function-move':
+ self.ProcessFunctionMove(int(row[1], 16), int(row[2], 16))
+ elif row[0] == 'function-delete':
+ self.ProcessFunctionDelete(int(row[1], 16))
elif row[0] == 'shared-library':
self.AddSharedLibraryEntry(row[1], int(row[2], 16), int(row[3], 16))
self.ParseVMSymbols(row[1], int(row[2], 16), int(row[3], 16))
@@ -275,6 +285,27 @@ class TickProcessor(object):
except splaytree.KeyNotFoundError:
print('Code delete event for unknown code: 0x%x' % from_addr)
+ def ProcessFunctionCreation(self, func_addr, code_addr):
+ js_entry_node = self.js_entries.Find(code_addr)
+ if js_entry_node:
+ js_entry = js_entry_node.value
+ self.js_entries.Insert(func_addr, JSCodeEntry(func_addr, js_entry.name, js_entry.type, 1, None))
+
+ def ProcessFunctionMove(self, from_addr, to_addr):
+ try:
+ removed_node = self.js_entries.Remove(from_addr)
+ removed_node.value.SetStartAddress(to_addr);
+ self.js_entries.Insert(to_addr, removed_node.value)
+ except splaytree.KeyNotFoundError:
+ return
+
+ def ProcessFunctionDelete(self, from_addr):
+ try:
+ removed_node = self.js_entries.Remove(from_addr)
+ self.deleted_code.append(removed_node.value)
+ except splaytree.KeyNotFoundError:
+ return
+
def ProcessBeginCodeRegion(self, id, assm, start, name):
if not assm in self.pending_assemblers:
self.pending_assemblers[assm] = Assembler()
@@ -320,7 +351,7 @@ class TickProcessor(object):
result.append(entry.ToString())
return result
- def ProcessTick(self, pc, sp, state, stack):
+ def ProcessTick(self, pc, sp, func, state, stack):
if state == VMStates['GC']:
self.number_of_gc_ticks += 1
if not self.IncludeTick(pc, sp, state):
@@ -337,11 +368,16 @@ class TickProcessor(object):
if len(stack) > 0:
caller_pc = stack.pop(0)
self.total_number_of_ticks -= 1
- self.ProcessTick(caller_pc, sp, state, stack)
+ self.ProcessTick(caller_pc, sp, func, state, stack)
else:
self.unaccounted_number_of_ticks += 1
else:
- entry.Tick(pc, self.ProcessStack(stack))
+ processed_stack = self.ProcessStack(stack)
+ if not entry.IsSharedLibraryEntry() and not entry.IsJSFunction():
+ func_entry_node = self.js_entries.Find(func)
+ if func_entry_node and func_entry_node.value.IsJSFunction():
+ processed_stack.insert(0, func_entry_node.value.ToString())
+ entry.Tick(pc, processed_stack)
if self.call_graph_json:
self.AddToPackedStacks(pc, stack)
diff --git a/tools/utils.py b/tools/utils.py
index 196bb055..435c12de 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -57,6 +57,8 @@ def GuessOS():
return 'freebsd'
elif id == 'OpenBSD':
return 'openbsd'
+ elif id == 'SunOS':
+ return 'solaris'
else:
return None
@@ -67,6 +69,8 @@ def GuessArchitecture():
return 'arm'
elif (not id) or (not re.match('(x|i[3-6])86', id) is None):
return 'ia32'
+ elif id == 'i86pc':
+ return 'ia32'
else:
return None
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 6b473597..f95f2279 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -388,18 +388,6 @@
RelativePath="..\..\src\factory.h"
>
</File>
- <File
- RelativePath="..\..\src\ia32\fast-codegen-ia32.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.h"
- >
- </File>
<File
RelativePath="..\..\src\flags.cc"
>
@@ -436,6 +424,18 @@
RelativePath="..\..\src\frames.h"
>
</File>
+ <File
+ RelativePath="..\..\src\ia32\full-codegen-ia32.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\func-name-inferrer.cc"
>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index afb4f74b..a8f17228 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -396,18 +396,6 @@
RelativePath="..\..\src\factory.h"
>
</File>
- <File
- RelativePath="..\..\src\arm\fast-codegen-arm.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.cc"
- >
- </File>
- <File
- RelativePath="..\..\src\fast-codegen.h"
- >
- </File>
<File
RelativePath="..\..\src\flags.cc"
>
@@ -444,6 +432,18 @@
RelativePath="..\..\src\frames.h"
>
</File>
+ <File
+ RelativePath="..\..\src\arm\full-codegen-arm.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.cc"
+ >
+ </File>
+ <File
+ RelativePath="..\..\src\full-codegen.h"
+ >
+ </File>
<File
RelativePath="..\..\src\func-name-inferrer.cc"
>